diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..2757fda --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +uc-ebook.org \ No newline at end of file diff --git a/_app/assets/pages/__layout.svelte-c9f861a9.css b/_app/assets/pages/__layout.svelte-c9f861a9.css new file mode 100644 index 0000000..2af3d3b --- /dev/null +++ b/_app/assets/pages/__layout.svelte-c9f861a9.css @@ -0,0 +1 @@ +*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji"}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input:-ms-input-placeholder,textarea:-ms-input-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}*,:before,:after{--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.m-4{margin:1rem}.mx-6{margin-left:1.5rem;margin-right:1.5rem}.my-6{margin-top:1.5rem;margin-bottom:1.5rem}.my-3{margin-top:.75rem;margin-bottom:.75rem}.mx-3{margin-left:.75rem;margin-right:.75rem}.mb-4{margin-bottom:1rem}.mt-6{margin-top:1.5rem}.mb-12{margin-bottom:3rem}.mb-3{margin-bottom:.75rem}.mt-3{margin-top:.75rem}.flex{display:flex}.hidden{display:none}.h-16{height:4rem}.h-20{height:5rem}.h-36{height:9rem}.w-full{width:100%}.w-80{width:20rem}.w-6{width:1.5rem}.w-64{width:16rem}.w-20{width:5rem}.max-w-screen-xl{max-width:1280px}.max-w-xs{max-width:20rem}.max-w-4xl{max-width:56rem}.flex-none{flex:none}.flex-row{flex-direction:row}.flex-col{flex-direction:column}.flex-wrap{flex-wrap:wrap}.items-start{align-items:flex-start}.items-center{align-items:center}.items-stretch{align-items:stretch}.justify-start{justify-content:flex-start}.justify-center{justify-content:center}.justify-around{justify-content:space-around}.overflow-auto{overflow:auto}.rounded-md{border-radius:.375rem}.rounded-lg{border-radius:.5rem}.rounded-l-md{border-top-left-radius:.375rem;border-bottom-left-radius:.375rem}.rounded-r-md{border-top-right-radius:.375rem;border-bottom-right-radius:.375rem}.rounded-br-md{border-bottom-right-radius:.375rem}.border-y-4{border-top-width:4px;border-bottom-width:4px}.border-double{border-style:double}.border-slate-50{--tw-border-opacity: 1;border-color:rgb(248 250 252 / var(--tw-border-opacity))}.bg-slate-900{--tw-bg-opacity: 1;background-color:rgb(15 23 42 / var(--tw-bg-opacity))}.bg-slate-50{--tw-bg-opacity: 1;background-color:rgb(248 250 252 / var(--tw-bg-opacity))}.bg-sky-700{--tw-bg-opacity: 1;background-color:rgb(3 105 161 / var(--tw-bg-opacity))}.bg-no-repeat{background-repeat:no-repeat}.object-contain{-o-object-fit:contain;object-fit:contain}.p-2{padding:.5rem}.py-6{padding-top:1.5rem;padding-bottom:1.5rem}.px-3{padding-left:.75rem;padding-right:.75rem}.py-12{padding-top:3rem;padding-bottom:3rem}.pl-3{padding-left:.75rem}.pb-1{padding-bottom:.25rem}.pt-1{padding-top:.25rem}.pb-6{padding-bottom:1.5rem}.pt-3{padding-top:.75rem}.pb-12{padding-bottom:3rem}.text-left{text-align:left}.text-center{text-align:center}.font-\[\'ornaments\'\]{font-family:ornaments}.text-sm{font-size:.875rem;line-height:1.25rem}.text-3xl{font-size:1.875rem;line-height:2.25rem}.text-xl{font-size:1.25rem;line-height:1.75rem}.text-4xl{font-size:2.25rem;line-height:2.5rem}.text-2xl{font-size:1.5rem;line-height:2rem}.text-lg{font-size:1.125rem;line-height:1.75rem}.font-bold{font-weight:700}.italic{font-style:italic}.leading-tight{line-height:1.25}.text-slate-900{--tw-text-opacity: 1;color:rgb(15 23 42 / var(--tw-text-opacity))}.text-slate-50{--tw-text-opacity: 1;color:rgb(248 250 252 / var(--tw-text-opacity))}.text-slate-500{--tw-text-opacity: 1;color:rgb(100 116 139 / var(--tw-text-opacity))}.text-slate-400{--tw-text-opacity: 1;color:rgb(148 163 184 / var(--tw-text-opacity))}.shadow-md{--tw-shadow: 0 4px 6px -1px rgb(0 0 0 / .1), 0 2px 4px -2px rgb(0 0 0 / .1);--tw-shadow-colored: 0 4px 6px -1px var(--tw-shadow-color), 0 2px 4px -2px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.drop-shadow{--tw-drop-shadow: drop-shadow(0 1px 2px rgb(0 0 0 / .1)) drop-shadow(0 1px 1px rgb(0 0 0 / .06));filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.backdrop-blur-sm{--tw-backdrop-blur: blur(4px);-webkit-backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia)}@font-face{font-family:ornaments;src:url(/fonts/nymphette-webfont.woff) format("woff"),url(/fonts/nymphette-webfont.woff2) format("woff"),url(fonts/nymphette-webfont.woff) format("woff"),url(fonts/nymphette-webfont.woff2) format("woff")}.hover\:cursor-pointer:hover{cursor:pointer}.hover\:bg-sky-500:hover{--tw-bg-opacity: 1;background-color:rgb(14 165 233 / var(--tw-bg-opacity))}.hover\:text-sky-700:hover{--tw-text-opacity: 1;color:rgb(3 105 161 / var(--tw-text-opacity))}.active\:bg-orange-400:active{--tw-bg-opacity: 1;background-color:rgb(251 146 60 / var(--tw-bg-opacity))}.group:hover .group-hover\:text-sky-500{--tw-text-opacity: 1;color:rgb(14 165 233 / var(--tw-text-opacity))}@media (min-width: 768px){.md\:block{display:block}.md\:w-96{width:24rem}}@media (min-width: 1024px){.lg\:mr-6{margin-right:1.5rem}.lg\:mb-0{margin-bottom:0}.lg\:mt-6{margin-top:1.5rem}.lg\:flex-row{flex-direction:row}.lg\:items-start{align-items:flex-start}.lg\:pl-56{padding-left:14rem}.lg\:pb-6{padding-bottom:1.5rem}.lg\:text-left{text-align:left}}@media (min-width: 1280px){.xl\:rounded-md{border-radius:.375rem}} diff --git a/_app/assets/pages/index.svelte-7f4057b4.css b/_app/assets/pages/index.svelte-7f4057b4.css new file mode 100644 index 0000000..3bfc066 --- /dev/null +++ b/_app/assets/pages/index.svelte-7f4057b4.css @@ -0,0 +1 @@ +.icon path{stroke-width:1}.text-shadow.svelte-uz1u3c{text-shadow:1px 1px 1px rgba(33,33,33,.5)} diff --git a/_app/chunks/vendor-80631b75.js b/_app/chunks/vendor-80631b75.js new file mode 100644 index 0000000..ce488f8 --- /dev/null +++ b/_app/chunks/vendor-80631b75.js @@ -0,0 +1 @@ +function j(){}function w(e,t){for(const n in t)e[n]=t[n];return e}function Me(e){return e()}function ze(){return Object.create(null)}function I(e){e.forEach(Me)}function Ue(e){return typeof e=="function"}function Ee(e,t){return e!=e?t==t:e!==t||e&&typeof e=="object"||typeof e=="function"}let W;function mt(e,t){return W||(W=document.createElement("a")),W.href=t,e===W.href}function Xe(e){return Object.keys(e).length===0}function gt(e,t,n,i){if(e){const l=je(e,t,n,i);return e[0](l)}}function je(e,t,n,i){return e[1]&&i?w(n.ctx.slice(),e[1](i(t))):n.ctx}function pt(e,t,n,i){if(e[2]&&i){const l=e[2](i(n));if(t.dirty===void 0)return l;if(typeof l=="object"){const s=[],f=Math.max(t.dirty.length,l.length);for(let u=0;u32){const t=[],n=e.ctx.length/32;for(let i=0;i>1);n(l)<=i?e=l+1:t=l}return e}function tt(e){if(e.hydrate_init)return;e.hydrate_init=!0;let t=e.childNodes;if(e.nodeName==="HEAD"){const c=[];for(let a=0;a0&&t[n[l]].claim_order<=a?l+1:et(1,l,m=>t[n[m]].claim_order,a))-1;i[c]=n[_]+1;const p=_+1;n[p]=c,l=Math.max(p,l)}const s=[],f=[];let u=t.length-1;for(let c=n[l]+1;c!=0;c=i[c-1]){for(s.push(t[c-1]);u>=c;u--)f.push(t[u]);u--}for(;u>=0;u--)f.push(t[u]);s.reverse(),f.sort((c,a)=>c.claim_order-a.claim_order);for(let c=0,a=0;c=s[a].claim_order;)a++;const _=ae.removeEventListener(t,n,i)}function lt(e,t,n){n==null?e.removeAttribute(t):e.getAttribute(t)!==n&&e.setAttribute(t,n)}function x(e,t){for(const n in t)lt(e,n,t[n])}function E(e){return Array.from(e.childNodes)}function it(e){e.claim_info===void 0&&(e.claim_info={last_index:0,total_claimed:0})}function Be(e,t,n,i,l=!1){it(e);const s=(()=>{for(let f=e.claim_info.last_index;f=0;f--){const u=e[f];if(t(u)){const c=n(u);return c===void 0?e.splice(f,1):e[f]=c,l?c===void 0&&e.claim_info.last_index--:e.claim_info.last_index=f,u}}return i()})();return s.claim_order=e.claim_info.total_claimed,e.claim_info.total_claimed+=1,s}function Se(e,t,n,i){return Be(e,l=>l.nodeName===t,l=>{const s=[];for(let f=0;fl.removeAttribute(f))},()=>i(t))}function xt(e,t,n){return Se(e,t,n,nt)}function B(e,t,n){return Se(e,t,n,z)}function ot(e,t){return Be(e,n=>n.nodeType===3,n=>{const i=""+t;if(n.data.startsWith(i)){if(n.data.length!==i.length)return n.splitText(i.length)}else n.data=i},()=>U(t),!0)}function wt(e){return ot(e," ")}function Ct(e,t){t=""+t,e.wholeText!==t&&(e.data=t)}function Mt(e,t,n,i){n===null?e.style.removeProperty(t):e.style.setProperty(t,n,i?"important":"")}function zt(e,t){for(let n=0;n{Q.delete(e),i&&(n&&e.d(1),i())}),e.o(t)}}function N(e,t){const n={},i={},l={$$scope:1};let s=e.length;for(;s--;){const f=e[s],u=t[s];if(u){for(const c in f)c in u||(i[c]=1);for(const c in u)l[c]||(n[c]=u[c],l[c]=1);e[s]=u}else for(const c in f)l[c]=1}for(const f in i)f in n||(n[f]=void 0);return n}function Vt(e){return typeof e=="object"&&e!==null?e:{}}function qt(e){e&&e.c()}function Tt(e,t){e&&e.l(t)}function at(e,t,n,i){const{fragment:l,on_mount:s,on_destroy:f,after_update:u}=e.$$;l&&l.m(t,n),i||Z(()=>{const c=s.map(Me).filter(Ue);f?f.push(...c):I(c),e.$$.on_mount=[]}),u.forEach(Z)}function st(e,t){const n=e.$$;n.fragment!==null&&(I(n.on_destroy),n.fragment&&n.fragment.d(t),n.on_destroy=n.fragment=null,n.ctx=[])}function ut(e,t){e.$$.dirty[0]===-1&&(D.push(e),Ve(),e.$$.dirty.fill(0)),e.$$.dirty[t/31|0]|=1<{const g=M.length?M[0]:m;return a.ctx&&l(a.ctx[p],a.ctx[p]=g)&&(!a.skip_bound&&a.bound[p]&&a.bound[p](g),_&&ut(e,p)),m}):[],a.update(),_=!0,I(a.before_update),a.fragment=i?i(a.ctx):!1,t.target){if(t.hydrate){Ze();const p=E(t.target);a.fragment&&a.fragment.l(p),p.forEach(b)}else a.fragment&&a.fragment.c();t.intro&&ct(e.$$.fragment),at(e,t.target,t.anchor,t.customElement),$e(),qe()}P(c)}class ht{$destroy(){st(this,1),this.$destroy=j}$on(t,n){const i=this.$$.callbacks[t]||(this.$$.callbacks[t]=[]);return i.push(n),()=>{const l=i.indexOf(n);l!==-1&&i.splice(l,1)}}$set(t){this.$$set&&!Xe(t)&&(this.$$.skip_bound=!0,this.$$set(t),this.$$.skip_bound=!1)}}const T=[];function It(e,t=j){let n;const i=new Set;function l(u){if(Ee(e,u)&&(e=u,n)){const c=!T.length;for(const a of i)a[1](),T.push(a,e);if(c){for(let a=0;a{i.delete(a),i.size===0&&(n(),n=null)}}return{set:l,update:s,subscribe:f}}function Te(e,t,n){const i=e.slice();return i[5]=t[n],i}function Ie(e,t,n){const i=e.slice();return i[5]=t[n],i}function Oe(e,t,n){const i=e.slice();return i[5]=t[n],i}function Pe(e,t,n){const i=e.slice();return i[5]=t[n],i}function De(e,t,n){const i=e.slice();return i[5]=t[n],i}function Fe(e,t,n){const i=e.slice();return i[5]=t[n],i}function We(e){let t,n=[e[5]],i={};for(let l=0;l{t=w(w({},t),Ye(a)),n(2,s=Ae(t,l)),"src"in a&&n(3,f=a.src),"size"in a&&n(0,u=a.size),"theme"in a&&n(4,c=a.theme)},e.$$.update=()=>{var a;e.$$.dirty&24&&n(1,i=(a=f==null?void 0:f[c])!=null?a:f==null?void 0:f.default)},[u,i,s,f,c]}class Ot extends ht{constructor(t){super();ft(this,t,dt,_t,Ee,{src:3,size:0,theme:4})}}const Pt={default:{a:{fill:"none",viewBox:"0 0 24 24",stroke:"currentColor","aria-hidden":"true"},path:[{"stroke-linecap":"round","stroke-linejoin":"round","stroke-width":"2",d:"M12 6.253v13m0-13C10.832 5.477 9.246 5 7.5 5S4.168 5.477 3 6.253v13C4.168 18.477 5.754 18 7.5 18s3.332.477 4.5 1.253m0-13C13.168 5.477 14.754 5 16.5 5c1.747 0 3.332.477 4.5 1.253v13C19.832 18.477 18.247 18 16.5 18c-1.746 0-3.332.477-4.5 1.253"}]},solid:{a:{viewBox:"0 0 20 20",fill:"currentColor","aria-hidden":"true"},path:[{d:"M9 4.804A7.968 7.968 0 005.5 4c-1.255 0-2.443.29-3.5.804v10A7.969 7.969 0 015.5 14c1.669 0 3.218.51 4.5 1.385A7.962 7.962 0 0114.5 14c1.255 0 2.443.29 3.5.804v-10A7.968 7.968 0 0014.5 4c-1.255 0-2.443.29-3.5.804V12a1 1 0 11-2 0V4.804z"}]}},Dt={default:{a:{fill:"none",viewBox:"0 0 24 24",stroke:"currentColor","aria-hidden":"true"},path:[{"stroke-linecap":"round","stroke-linejoin":"round","stroke-width":"2",d:"M8 5H6a2 2 0 00-2 2v12a2 2 0 002 2h10a2 2 0 002-2v-1M8 5a2 2 0 002 2h2a2 2 0 002-2M8 5a2 2 0 012-2h2a2 2 0 012 2m0 0h2a2 2 0 012 2v3m2 4H10m0 0l3-3m-3 3l3 3"}]},solid:{a:{viewBox:"0 0 20 20",fill:"currentColor","aria-hidden":"true"},path:[{d:"M8 2a1 1 0 000 2h2a1 1 0 100-2H8z"},{d:"M3 5a2 2 0 012-2 3 3 0 003 3h2a3 3 0 003-3 2 2 0 012 2v6h-4.586l1.293-1.293a1 1 0 00-1.414-1.414l-3 3a1 1 0 000 1.414l3 3a1 1 0 001.414-1.414L10.414 13H15v3a2 2 0 01-2 2H5a2 2 0 01-2-2V5zM15 11h2a1 1 0 110 2h-2v-2z"}]}},Ft={default:{a:{fill:"none",viewBox:"0 0 24 24",stroke:"currentColor","aria-hidden":"true"},path:[{"stroke-linecap":"round","stroke-linejoin":"round","stroke-width":"2",d:"M10 20l4-16m4 4l4 4-4 4M6 16l-4-4 4-4"}]},solid:{a:{viewBox:"0 0 20 20",fill:"currentColor","aria-hidden":"true"},path:[{"fill-rule":"evenodd",d:"M12.316 3.051a1 1 0 01.633 1.265l-4 12a1 1 0 11-1.898-.632l4-12a1 1 0 011.265-.633zM5.707 6.293a1 1 0 010 1.414L3.414 10l2.293 2.293a1 1 0 11-1.414 1.414l-3-3a1 1 0 010-1.414l3-3a1 1 0 011.414 0zm8.586 0a1 1 0 011.414 0l3 3a1 1 0 010 1.414l-3 3a1 1 0 11-1.414-1.414L16.586 10l-2.293-2.293a1 1 0 010-1.414z","clip-rule":"evenodd"}]}},Wt={default:{a:{fill:"none",viewBox:"0 0 24 24",stroke:"currentColor","aria-hidden":"true"},path:[{"stroke-linecap":"round","stroke-linejoin":"round","stroke-width":"2",d:"M12 10v6m0 0l-3-3m3 3l3-3m2 8H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z"}]},solid:{a:{viewBox:"0 0 20 20",fill:"currentColor","aria-hidden":"true"},path:[{"fill-rule":"evenodd",d:"M6 2a2 2 0 00-2 2v12a2 2 0 002 2h8a2 2 0 002-2V7.414A2 2 0 0015.414 6L12 2.586A2 2 0 0010.586 2H6zm5 6a1 1 0 10-2 0v3.586l-1.293-1.293a1 1 0 10-1.414 1.414l3 3a1 1 0 001.414 0l3-3a1 1 0 00-1.414-1.414L11 11.586V8z","clip-rule":"evenodd"}]}},Gt={default:{a:{viewBox:"0 0 24 24",fill:"none",stroke:"currentColor","stroke-width":"2","stroke-linecap":"round","stroke-linejoin":"round"},path:[{d:"M18 2h-3a5 5 0 0 0-5 5v3H7v4h3v8h4v-8h3l1-4h-4V7a1 1 0 0 1 1-1h3z"}]}},Jt={default:{a:{viewBox:"0 0 24 24",fill:"none",stroke:"currentColor","stroke-width":"2","stroke-linecap":"round","stroke-linejoin":"round"},path:[{d:"M16 8a6 6 0 0 1 6 6v7h-4v-7a2 2 0 0 0-2-2 2 2 0 0 0-2 2v7h-4v-7a6 6 0 0 1 6-6z"}],rect:[{x:"2",y:"9",width:"4",height:"12"}],circle:[{cx:"4",cy:"4",r:"2"}]}},Kt={default:{a:{viewBox:"0 0 24 24",fill:"none",stroke:"currentColor","stroke-width":"2","stroke-linecap":"round","stroke-linejoin":"round"},path:[{d:"M23 3a10.9 10.9 0 0 1-3.14 1.53 4.48 4.48 0 0 0-7.86 3v1A10.66 10.66 0 0 1 3 4s-4 9 5 13a11.64 11.64 0 0 1-7 2c9 5 20 0 20-11.5a4.5 4.5 0 0 0-.08-.83A7.72 7.72 0 0 0 23 3z"}]}};export{Vt as A,st as B,w as C,It as D,St as E,gt as F,vt as G,yt as H,pt as I,V as J,j as K,Ot as L,Dt as M,Z as N,zt as O,bt as P,q as Q,I as R,ht as S,Et as T,mt as U,Kt as V,Gt as W,Jt as X,Pt as Y,Wt as Z,Ft as _,E as a,lt as b,xt as c,b as d,nt as e,Mt as f,A as g,ot as h,ft as i,Ct as j,kt as k,C as l,wt as m,Nt as n,Lt as o,Ht as p,ct as q,Bt as r,Ee as s,U as t,At as u,jt as v,qt as w,Tt as x,at as y,N as z}; diff --git a/_app/error.svelte-bcbf8e93.js b/_app/error.svelte-bcbf8e93.js new file mode 100644 index 0000000..4f0f8d4 --- /dev/null +++ b/_app/error.svelte-bcbf8e93.js @@ -0,0 +1 @@ +import{S as K,i as w,s as y,e as v,t as E,c as d,a as b,h as P,d as m,g as n,J as R,j,k as N,l as q,m as S,K as C}from"./chunks/vendor-80631b75.js";function H(r){let l,t=r[1].frame+"",a;return{c(){l=v("pre"),a=E(t)},l(f){l=d(f,"PRE",{});var s=b(l);a=P(s,t),s.forEach(m)},m(f,s){n(f,l,s),R(l,a)},p(f,s){s&2&&t!==(t=f[1].frame+"")&&j(a,t)},d(f){f&&m(l)}}}function J(r){let l,t=r[1].stack+"",a;return{c(){l=v("pre"),a=E(t)},l(f){l=d(f,"PRE",{});var s=b(l);a=P(s,t),s.forEach(m)},m(f,s){n(f,l,s),R(l,a)},p(f,s){s&2&&t!==(t=f[1].stack+"")&&j(a,t)},d(f){f&&m(l)}}}function z(r){let l,t,a,f,s=r[1].message+"",c,k,u,p,i=r[1].frame&&H(r),o=r[1].stack&&J(r);return{c(){l=v("h1"),t=E(r[0]),a=N(),f=v("pre"),c=E(s),k=N(),i&&i.c(),u=N(),o&&o.c(),p=q()},l(e){l=d(e,"H1",{});var _=b(l);t=P(_,r[0]),_.forEach(m),a=S(e),f=d(e,"PRE",{});var h=b(f);c=P(h,s),h.forEach(m),k=S(e),i&&i.l(e),u=S(e),o&&o.l(e),p=q()},m(e,_){n(e,l,_),R(l,t),n(e,a,_),n(e,f,_),R(f,c),n(e,k,_),i&&i.m(e,_),n(e,u,_),o&&o.m(e,_),n(e,p,_)},p(e,[_]){_&1&&j(t,e[0]),_&2&&s!==(s=e[1].message+"")&&j(c,s),e[1].frame?i?i.p(e,_):(i=H(e),i.c(),i.m(u.parentNode,u)):i&&(i.d(1),i=null),e[1].stack?o?o.p(e,_):(o=J(e),o.c(),o.m(p.parentNode,p)):o&&(o.d(1),o=null)},i:C,o:C,d(e){e&&m(l),e&&m(a),e&&m(f),e&&m(k),i&&i.d(e),e&&m(u),o&&o.d(e),e&&m(p)}}}function D({error:r,status:l}){return{props:{error:r,status:l}}}function A(r,l,t){let{status:a}=l,{error:f}=l;return r.$$set=s=>{"status"in s&&t(0,a=s.status),"error"in s&&t(1,f=s.error)},[a,f]}class F extends K{constructor(l){super();w(this,l,A,z,y,{status:0,error:1})}}export{F as default,D as load}; diff --git a/_app/manifest.json b/_app/manifest.json new file mode 100644 index 0000000..6743374 --- /dev/null +++ b/_app/manifest.json @@ -0,0 +1,51 @@ +{ + ".svelte-kit/runtime/client/start.js": { + "file": "start-e1e0ba09.js", + "src": ".svelte-kit/runtime/client/start.js", + "isEntry": true, + "imports": [ + "_vendor-80631b75.js" + ], + "dynamicImports": [ + "src/routes/__layout.svelte", + ".svelte-kit/runtime/components/error.svelte", + "src/routes/index.svelte" + ] + }, + "src/routes/__layout.svelte": { + "file": "pages/__layout.svelte-3f8aeddf.js", + "src": "src/routes/__layout.svelte", + "isEntry": true, + "isDynamicEntry": true, + "imports": [ + "_vendor-80631b75.js" + ], + "css": [ + "assets/pages/__layout.svelte-c9f861a9.css" + ] + }, + ".svelte-kit/runtime/components/error.svelte": { + "file": "error.svelte-bcbf8e93.js", + "src": ".svelte-kit/runtime/components/error.svelte", + "isEntry": true, + "isDynamicEntry": true, + "imports": [ + "_vendor-80631b75.js" + ] + }, + "src/routes/index.svelte": { + "file": "pages/index.svelte-255aa6ea.js", + "src": "src/routes/index.svelte", + "isEntry": true, + "isDynamicEntry": true, + "imports": [ + "_vendor-80631b75.js" + ], + "css": [ + "assets/pages/index.svelte-7f4057b4.css" + ] + }, + "_vendor-80631b75.js": { + "file": "chunks/vendor-80631b75.js" + } +} \ No newline at end of file diff --git a/_app/pages/__layout.svelte-3f8aeddf.js b/_app/pages/__layout.svelte-3f8aeddf.js new file mode 100644 index 0000000..13ceb37 --- /dev/null +++ b/_app/pages/__layout.svelte-3f8aeddf.js @@ -0,0 +1 @@ +import{S as l,i,s as r,F as u,G as f,H as _,I as c,q as p,o as d}from"../chunks/vendor-80631b75.js";function m(n){let s;const o=n[1].default,e=u(o,n,n[0],null);return{c(){e&&e.c()},l(t){e&&e.l(t)},m(t,a){e&&e.m(t,a),s=!0},p(t,[a]){e&&e.p&&(!s||a&1)&&f(e,o,t,t[0],s?c(o,t[0],a,null):_(t[0]),null)},i(t){s||(p(e,t),s=!0)},o(t){d(e,t),s=!1},d(t){e&&e.d(t)}}}function $(n,s,o){let{$$slots:e={},$$scope:t}=s;return n.$$set=a=>{"$$scope"in a&&o(0,t=a.$$scope)},[t,e]}class h extends l{constructor(s){super();i(this,s,$,m,r,{})}}export{h as default}; diff --git a/_app/pages/index.svelte-255aa6ea.js b/_app/pages/index.svelte-255aa6ea.js new file mode 100644 index 0000000..0900287 --- /dev/null +++ b/_app/pages/index.svelte-255aa6ea.js @@ -0,0 +1,20 @@ +import{S as Kt,i as Ct,s as Ot,e as r,t as y,c as i,a as o,h as b,d as s,g as B,J as e,K as ft,L as Te,M as Zt,k as w,w as Ve,m as k,x as Se,N as Xt,b as a,f as pe,O as Pt,y as Me,P as Gt,j as Qt,q as N,o as Y,Q as ht,B as Ae,R as Wt,T as Yt,U as Ut,V as el,W as tl,X as ll,p as sl,v as al,n as rl,Y as il,Z as nl,_ as ol}from"../chunks/vendor-80631b75.js";function Ht(d,l,c){const n=d.slice();return n[0]=l[c],n}function Ft(d){let l,c=d[0]+"",n,p;return{c(){l=r("option"),n=y(c),this.h()},l(m){l=i(m,"OPTION",{});var u=o(l);n=b(u,c),u.forEach(s),this.h()},h(){l.__value=p=d[0],l.value=l.__value},m(m,u){B(m,l,u),e(l,n)},p:ft,d(m){m&&s(l)}}}function cl(d){let l,c,n,p,m,u,g,E=(d[1][d[0]]||"")+"",j,A,M,I,R,_,$,S=Object.keys(d[1]),x=[];for(let h=0;hd[3].call(n)),a(c,"class","mb-4"),a(g,"class","m-4 text-sm text-slate-900 w-80 md:w-96 text-left leading-tight "),pe(u,"box-shadow","inset 0 2px 4px 0 rgb(0 0 0 / 0.2)"),a(u,"class","bg-slate-50 rounded-l-md overflow-auto"),a(M,"title","Copy to clipboard"),a(M,"class","rounded-r-md drop-shadow p-2 bg-sky-700 hover:bg-sky-500 hover:cursor-pointer active:bg-orange-400"),a(m,"class","flex flex-row items-stretch justify-center"),a(l,"class","flex flex-col items-center justify-center")},m(h,T){B(h,l,T),e(l,c),e(c,n);for(let D=0;D{try{navigator.clipboard.writeText(p[n])}catch{}};function u(){n=Yt(this),c(0,n),c(1,p)}return[n,p,m,u]}class ul extends Kt{constructor(l){super();Ct(this,l,dl,cl,Ot,{})}}function zt(d,l,c){const n=d.slice();return n[6]=l[c],n[8]=c,n}function Lt(d,l,c){const n=d.slice();return n[9]=l[c],n}function qt(d){let l,c,n,p,m=d[9].title+"",u,g,E,j=d[9].text+"",A,M,I,R;return c=new Te({props:{src:d[9].icon,class:"icon w-20 text-slate-50 group-hover:text-sky-500"}}),{c(){l=r("a"),Ve(c.$$.fragment),n=w(),p=r("div"),u=y(m),g=w(),E=r("div"),A=y(j),M=w(),this.h()},l(_){l=i(_,"A",{target:!0,href:!0,class:!0});var $=o(l);Se(c.$$.fragment,$),n=k($),p=i($,"DIV",{class:!0});var S=o(p);u=b(S,m),S.forEach(s),g=k($),E=i($,"DIV",{class:!0});var x=o(E);A=b(x,j),x.forEach(s),M=k($),$.forEach(s),this.h()},h(){a(p,"class","text-xl font-bold text-slate-50 group-hover:text-sky-500"),a(E,"class","text-md text-slate-50 group-hover:text-sky-500"),a(l,"target","_blank"),a(l,"href",I=d[9].url),a(l,"class","flex-none flex flex-col items-center justify-center w-64 pt-3 pb-6 group")},m(_,$){B(_,l,$),Me(c,l,null),e(l,n),e(l,p),e(p,u),e(l,g),e(l,E),e(E,A),e(l,M),R=!0},p:ft,i(_){R||(N(c.$$.fragment,_),R=!0)},o(_){Y(c.$$.fragment,_),R=!1},d(_){_&&s(l),Ae(c)}}}function Jt(d){let l,c,n,p,m=d[8]+1+"",u,g,E,j=d[6].text+"",A,M,I,R,_;return{c(){l=r("div"),c=r("div"),n=r("div"),p=r("div"),u=y(m),g=w(),E=r("div"),A=y(j),M=w(),I=r("img"),_=w(),this.h()},l($){l=i($,"DIV",{class:!0});var S=o(l);c=i(S,"DIV",{class:!0});var x=o(c);n=i(x,"DIV",{class:!0});var h=o(n);p=i(h,"DIV",{class:!0});var T=o(p);u=b(T,m),T.forEach(s),h.forEach(s),g=k(x),E=i(x,"DIV",{class:!0});var D=o(E);A=b(D,j),D.forEach(s),x.forEach(s),M=k(S),I=i(S,"IMG",{class:!0,src:!0,alt:!0}),_=k(S),S.forEach(s),this.h()},h(){a(p,"class","text-4xl font-bold text-slate-50 text-shadow svelte-uz1u3c"),a(n,"class","bg-slate-900 w-20 h-20 flex-none flex items-center justify-center rounded-br-md"),a(E,"class","text-md text-slate-900 mx-3 mt-3 leading-tight"),a(c,"class","flex flex-row items-start justify-start text-left mb-3"),a(I,"class","w-full h-36 my-3 object-contain"),Ut(I.src,R=d[6].image)||a(I,"src",R),a(I,"alt",""),a(l,"class","bg-slate-50 w-80 flex-none mb-12 lg:mb-0 shadow-md rounded-md")},m($,S){B($,l,S),e(l,c),e(c,n),e(n,p),e(p,u),e(c,g),e(c,E),e(E,A),e(l,M),e(l,I),e(l,_)},p:ft,d($){$&&s(l)}}}function fl(d){let l,c,n,p,m,u,g,E,j,A,M,I,R,_,$,S,x,h,T,D,P,Z,ee,C,F,te,Fe,ge,ze,Le,_e,qe,je,z,xe,Je,Ne,le,Re,L,ye,Ze,Xe,se,Be,q,be,Qe,We,U,X,ae,Ye,Q,re,et,W,ie,Ke,J,we,tt,lt,ke,ne,Ce,oe,O,st,ce,at,rt,de,it,nt,ue,ot,ct,fe,dt,ut,he,ve=d[3],V=[];for(let t=0;tY(V[t],1,1,()=>{V[t]=null});let Ee=d[4],K=[];for(let t=0;t{var j,A;document.title="Addressing Uncertainty",m=encodeURIComponent((A=(j=window==null?void 0:window.location)==null?void 0:j.href)!=null?A:""),c(0,u=`https://www.facebook.com/sharer.php?u=${m}`),c(1,g=`https://twitter.com/intent/tweet?url=${m}&text=Check%20out%20this%20eBook%3A%20Addressing%20Uncertainty%20in%20MultiSector%20Dynamics%20Research&hashtags=DOE,IM3,MultiSectorDynamics`),c(2,E=`https://www.linkedin.com/shareArticle?mini=true&url=${m}&title=Check%20out%20this%20eBook%3A%20Addressing%20Uncertainty%20in%20MultiSector%20Dynamics%20Research&summary=Open%20access%20eBook%20showcasing%20sensitivity%20analysis%20and%20diagnostic%20model%20evaluation%20techniques.`)}),[u,g,E,n,p]}class pl extends Kt{constructor(l){super();Ct(this,l,hl,fl,Ot,{})}}export{pl as default,ml as router}; diff --git a/_app/start-e1e0ba09.js b/_app/start-e1e0ba09.js new file mode 100644 index 0000000..bdc419a --- /dev/null +++ b/_app/start-e1e0ba09.js @@ -0,0 +1 @@ +var fe=Object.defineProperty,ue=Object.defineProperties;var he=Object.getOwnPropertyDescriptors;var J=Object.getOwnPropertySymbols;var H=Object.prototype.hasOwnProperty,Q=Object.prototype.propertyIsEnumerable;var Z=(o,e,t)=>e in o?fe(o,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):o[e]=t,y=(o,e)=>{for(var t in e||(e={}))H.call(e,t)&&Z(o,t,e[t]);if(J)for(var t of J(e))Q.call(e,t)&&Z(o,t,e[t]);return o},W=(o,e)=>ue(o,he(e));var ee=(o,e)=>{var t={};for(var r in o)H.call(o,r)&&e.indexOf(r)<0&&(t[r]=o[r]);if(o!=null&&J)for(var r of J(o))e.indexOf(r)<0&&Q.call(o,r)&&(t[r]=o[r]);return t};import{S as de,i as _e,s as pe,e as ge,c as me,a as we,d as $,b as z,f as A,g as S,t as be,h as ve,j as ye,k as ke,l as w,m as $e,n as x,o as b,p as O,q as v,r as Ee,u as Re,v as G,w as L,x as P,y as U,z as C,A as V,B as j,C as I,D as K,E as te}from"./chunks/vendor-80631b75.js";function Se(o){let e,t,r;const l=[o[1]||{}];var i=o[0][0];function a(s){let n={};for(let c=0;c{j(f,1)}),O()}i?(e=new i(a()),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&j(e,s)}}}function Le(o){let e,t,r;const l=[o[1]||{}];var i=o[0][0];function a(s){let n={$$slots:{default:[Ne]},$$scope:{ctx:s}};for(let c=0;c{j(f,1)}),O()}i?(e=new i(a(s)),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&j(e,s)}}}function Ue(o){let e,t,r;const l=[o[2]||{}];var i=o[0][1];function a(s){let n={};for(let c=0;c{j(f,1)}),O()}i?(e=new i(a()),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&j(e,s)}}}function je(o){let e,t,r;const l=[o[2]||{}];var i=o[0][1];function a(s){let n={$$slots:{default:[Ae]},$$scope:{ctx:s}};for(let c=0;c{j(f,1)}),O()}i?(e=new i(a(s)),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&j(e,s)}}}function Ae(o){let e,t,r;const l=[o[3]||{}];var i=o[0][2];function a(s){let n={};for(let c=0;c{j(f,1)}),O()}i?(e=new i(a()),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&j(e,s)}}}function Ne(o){let e,t,r,l;const i=[je,Ue],a=[];function s(n,c){return n[0][2]?0:1}return e=s(o),t=a[e]=i[e](o),{c(){t.c(),r=w()},l(n){t.l(n),r=w()},m(n,c){a[e].m(n,c),S(n,r,c),l=!0},p(n,c){let f=e;e=s(n),e===f?a[e].p(n,c):(x(),b(a[f],1,1,()=>{a[f]=null}),O(),t=a[e],t?t.p(n,c):(t=a[e]=i[e](n),t.c()),v(t,1),t.m(r.parentNode,r))},i(n){l||(v(t),l=!0)},o(n){b(t),l=!1},d(n){a[e].d(n),n&&$(r)}}}function se(o){let e,t=o[5]&&re(o);return{c(){e=ge("div"),t&&t.c(),this.h()},l(r){e=me(r,"DIV",{id:!0,"aria-live":!0,"aria-atomic":!0,style:!0});var l=we(e);t&&t.l(l),l.forEach($),this.h()},h(){z(e,"id","svelte-announcer"),z(e,"aria-live","assertive"),z(e,"aria-atomic","true"),A(e,"position","absolute"),A(e,"left","0"),A(e,"top","0"),A(e,"clip","rect(0 0 0 0)"),A(e,"clip-path","inset(50%)"),A(e,"overflow","hidden"),A(e,"white-space","nowrap"),A(e,"width","1px"),A(e,"height","1px")},m(r,l){S(r,e,l),t&&t.m(e,null)},p(r,l){r[5]?t?t.p(r,l):(t=re(r),t.c(),t.m(e,null)):t&&(t.d(1),t=null)},d(r){r&&$(e),t&&t.d()}}}function re(o){let e;return{c(){e=be(o[6])},l(t){e=ve(t,o[6])},m(t,r){S(t,e,r)},p(t,r){r&64&&ye(e,t[6])},d(t){t&&$(e)}}}function Te(o){let e,t,r,l,i;const a=[Le,Se],s=[];function n(f,h){return f[0][1]?0:1}e=n(o),t=s[e]=a[e](o);let c=o[4]&&se(o);return{c(){t.c(),r=ke(),c&&c.c(),l=w()},l(f){t.l(f),r=$e(f),c&&c.l(f),l=w()},m(f,h){s[e].m(f,h),S(f,r,h),c&&c.m(f,h),S(f,l,h),i=!0},p(f,[h]){let u=e;e=n(f),e===u?s[e].p(f,h):(x(),b(s[u],1,1,()=>{s[u]=null}),O(),t=s[e],t?t.p(f,h):(t=s[e]=a[e](f),t.c()),v(t,1),t.m(r.parentNode,r)),f[4]?c?c.p(f,h):(c=se(f),c.c(),c.m(l.parentNode,l)):c&&(c.d(1),c=null)},i(f){i||(v(t),i=!0)},o(f){b(t),i=!1},d(f){s[e].d(f),f&&$(r),c&&c.d(f),f&&$(l)}}}function xe(o,e,t){let{stores:r}=e,{page:l}=e,{components:i}=e,{props_0:a=null}=e,{props_1:s=null}=e,{props_2:n=null}=e;Ee("__svelte__",r),Re(r.page.notify);let c=!1,f=!1,h=null;return G(()=>{const u=r.page.subscribe(()=>{c&&(t(5,f=!0),t(6,h=document.title||"untitled page"))});return t(4,c=!0),u}),o.$$set=u=>{"stores"in u&&t(7,r=u.stores),"page"in u&&t(8,l=u.page),"components"in u&&t(0,i=u.components),"props_0"in u&&t(1,a=u.props_0),"props_1"in u&&t(2,s=u.props_1),"props_2"in u&&t(3,n=u.props_2)},o.$$.update=()=>{o.$$.dirty&384&&r.page.set(l)},[i,a,s,n,c,f,h,r,l]}class Oe extends de{constructor(e){super();_e(this,e,xe,Te,pe,{stores:7,page:8,components:0,props_0:1,props_1:2,props_2:3})}}const Pe="modulepreload",ie={},Ce="/_app/",M=function(e,t){return!t||t.length===0?e():Promise.all(t.map(r=>{if(r=`${Ce}${r}`,r in ie)return;ie[r]=!0;const l=r.endsWith(".css"),i=l?'[rel="stylesheet"]':"";if(document.querySelector(`link[href="${r}"]${i}`))return;const a=document.createElement("link");if(a.rel=l?"stylesheet":Pe,l||(a.as="script",a.crossOrigin=""),a.href=r,document.head.appendChild(a),l)return new Promise((s,n)=>{a.addEventListener("load",s),a.addEventListener("error",n)})})).then(()=>e())},D=[()=>M(()=>import("./pages/__layout.svelte-3f8aeddf.js"),["pages/__layout.svelte-3f8aeddf.js","assets/pages/__layout.svelte-c9f861a9.css","chunks/vendor-80631b75.js"]),()=>M(()=>import("./error.svelte-bcbf8e93.js"),["error.svelte-bcbf8e93.js","chunks/vendor-80631b75.js"]),()=>M(()=>import("./pages/index.svelte-255aa6ea.js"),["pages/index.svelte-255aa6ea.js","assets/pages/index.svelte-7f4057b4.css","chunks/vendor-80631b75.js"])],Ve=[[/^\/$/,[D[0],D[2]],[D[1]]]],Ie=[D[0](),D[1]()];function De(o){let e=o.baseURI;if(!e){const t=o.getElementsByTagName("base");e=t.length?t[0].href:o.URL}return e}let Y="";function qe(o){Y=o.base,o.assets}function F(){return{x:pageXOffset,y:pageYOffset}}function ne(o){return o.composedPath().find(t=>t instanceof Node&&t.nodeName.toUpperCase()==="A")}function ae(o){return o instanceof SVGAElement?new URL(o.href.baseVal,document.baseURI):new URL(o.href)}class Be{constructor({base:e,routes:t,trailing_slash:r,renderer:l}){var i,a;this.base=e,this.routes=t,this.trailing_slash=r,this.navigating=0,this.renderer=l,l.router=this,this.enabled=!0,document.body.setAttribute("tabindex","-1"),this.current_history_index=(a=(i=history.state)==null?void 0:i["sveltekit:index"])!=null?a:0,this.current_history_index===0&&history.replaceState(W(y({},history.state),{"sveltekit:index":0}),"",location.href),this.callbacks={before_navigate:[],after_navigate:[]}}init_listeners(){"scrollRestoration"in history&&(history.scrollRestoration="manual"),addEventListener("beforeunload",i=>{let a=!1;const s={from:this.renderer.current.url,to:null,cancel:()=>a=!0};this.callbacks.before_navigate.forEach(n=>n(s)),a?(i.preventDefault(),i.returnValue=""):history.scrollRestoration="auto"}),addEventListener("load",()=>{history.scrollRestoration="manual"});let e;addEventListener("scroll",()=>{clearTimeout(e),e=setTimeout(()=>{const i=W(y({},history.state||{}),{"sveltekit:scroll":F()});history.replaceState(i,document.title,window.location.href)},200)});const t=i=>{const a=ne(i);a&&a.href&&a.hasAttribute("sveltekit:prefetch")&&this.prefetch(ae(a))};let r;const l=i=>{clearTimeout(r),r=setTimeout(()=>{var a;(a=i.target)==null||a.dispatchEvent(new CustomEvent("sveltekit:trigger_prefetch",{bubbles:!0}))},20)};addEventListener("touchstart",t),addEventListener("mousemove",l),addEventListener("sveltekit:trigger_prefetch",t),addEventListener("click",i=>{if(!this.enabled||i.button||i.which!==1||i.metaKey||i.ctrlKey||i.shiftKey||i.altKey||i.defaultPrevented)return;const a=ne(i);if(!a||!a.href)return;const s=ae(a);if(s.toString()===location.href){location.hash||i.preventDefault();return}const c=(a.getAttribute("rel")||"").split(/\s+/);if(!(a.hasAttribute("download")||c&&c.includes("external"))&&!(a instanceof SVGAElement?a.target.baseVal:a.target)){if(s.href.split("#")[0]===location.href.split("#")[0]){setTimeout(()=>history.pushState({},"",s.href));const f=this.parse(s);return f?this.renderer.update(f,[],!1):void 0}this._navigate({url:s,scroll:a.hasAttribute("sveltekit:noscroll")?F():null,keepfocus:!1,chain:[],details:{state:{},replaceState:!1},accepted:()=>i.preventDefault(),blocked:()=>i.preventDefault()})}}),addEventListener("popstate",i=>{if(i.state&&this.enabled){if(i.state["sveltekit:index"]===this.current_history_index)return;this._navigate({url:new URL(location.href),scroll:i.state["sveltekit:scroll"],keepfocus:!1,chain:[],details:null,accepted:()=>{this.current_history_index=i.state["sveltekit:index"]},blocked:()=>{const a=this.current_history_index-i.state["sveltekit:index"];history.go(a)}})}})}owns(e){return e.origin===location.origin&&e.pathname.startsWith(this.base)}parse(e){if(this.owns(e)){const t=decodeURI(e.pathname.slice(this.base.length)||"/");return{id:e.pathname+e.search,routes:this.routes.filter(([r])=>r.test(t)),url:e,path:t}}}async goto(e,{noscroll:t=!1,replaceState:r=!1,keepfocus:l=!1,state:i={}}={},a){const s=new URL(e,De(document));return this.enabled?this._navigate({url:s,scroll:t?F():null,keepfocus:l,chain:a,details:{state:i,replaceState:r},accepted:()=>{},blocked:()=>{}}):(location.href=s.href,new Promise(()=>{}))}enable(){this.enabled=!0}disable(){this.enabled=!1}async prefetch(e){const t=this.parse(e);if(!t)throw new Error("Attempted to prefetch a URL that does not belong to this app");return this.renderer.load(t)}after_navigate(e){G(()=>(this.callbacks.after_navigate.push(e),()=>{const t=this.callbacks.after_navigate.indexOf(e);this.callbacks.after_navigate.splice(t,1)}))}before_navigate(e){G(()=>(this.callbacks.before_navigate.push(e),()=>{const t=this.callbacks.before_navigate.indexOf(e);this.callbacks.before_navigate.splice(t,1)}))}async _navigate({url:e,scroll:t,keepfocus:r,chain:l,details:i,accepted:a,blocked:s}){const n=this.renderer.current.url;let c=!1;const f={from:n,to:e,cancel:()=>c=!0};if(this.callbacks.before_navigate.forEach(d=>d(f)),c){s();return}const h=this.parse(e);if(!h)return location.href=e.href,new Promise(()=>{});a(),this.navigating||dispatchEvent(new CustomEvent("sveltekit:navigation-start")),this.navigating++;let{pathname:u}=e;if(this.trailing_slash==="never"?u!=="/"&&u.endsWith("/")&&(u=u.slice(0,-1)):this.trailing_slash==="always"&&!e.pathname.split("/").pop().includes(".")&&!u.endsWith("/")&&(u+="/"),h.url=new URL(e.origin+u+e.search+e.hash),i){const d=i.replaceState?0:1;i.state["sveltekit:index"]=this.current_history_index+=d,history[i.replaceState?"replaceState":"pushState"](i.state,"",h.url)}if(await this.renderer.handle_navigation(h,l,!1,{scroll:t,keepfocus:r}),this.navigating--,!this.navigating){dispatchEvent(new CustomEvent("sveltekit:navigation-end"));const d={from:n,to:e};this.callbacks.after_navigate.forEach(_=>_(d))}}}function oe(o){return o instanceof Error||o&&o.name&&o.message?o:new Error(JSON.stringify(o))}function Je(o){let e=5381,t=o.length;if(typeof o=="string")for(;t;)e=e*33^o.charCodeAt(--t);else for(;t;)e=e*33^o[--t];return(e>>>0).toString(36)}function le(o){const e=o.status&&o.status>=400&&o.status<=599&&!o.redirect;if(o.error||e){const t=o.status;if(!o.error&&e)return{status:t||500,error:new Error};const r=typeof o.error=="string"?new Error(o.error):o.error;return r instanceof Error?!t||t<400||t>599?(console.warn('"error" returned from load() without a valid status code \u2014 defaulting to 500'),{status:500,error:r}):{status:t,error:r}:{status:500,error:new Error(`"error" property returned from load() must be a string or instance of Error, received type "${typeof r}"`)}}if(o.redirect){if(!o.status||Math.floor(o.status/100)!==3)return{status:500,error:new Error('"redirect" property returned from load() must be accompanied by a 3xx status code')};if(typeof o.redirect!="string")return{status:500,error:new Error('"redirect" property returned from load() must be a string')}}if(o.context)throw new Error('You are returning "context" from a load function. "context" was renamed to "stuff", please adjust your code accordingly.');return o}function ce(o){const e=K(o);let t=!0;function r(){t=!0,e.update(a=>a)}function l(a){t=!1,e.set(a)}function i(a){let s;return e.subscribe(n=>{(s===void 0||t&&n!==s)&&a(s=n)})}return{notify:r,set:l,subscribe:i}}function Ke(){const{set:o,subscribe:e}=K(!1),t="1720790592397";let r;async function l(){clearTimeout(r);const a=await fetch(`${Y}/_app/version.json`,{headers:{pragma:"no-cache","cache-control":"no-cache"}});if(a.ok){const{version:s}=await a.json(),n=s!==t;return n&&(o(!0),clearTimeout(r)),n}else throw new Error(`Version check failed: ${a.status}`)}return{subscribe:e,check:l}}function We(o,e){const t=typeof o=="string"?o:o.url;let r=`script[data-type="svelte-data"][data-url=${JSON.stringify(t)}]`;e&&typeof e.body=="string"&&(r+=`[data-body="${Je(e.body)}"]`);const l=document.querySelector(r);if(l&&l.textContent){const i=JSON.parse(l.textContent),{body:a}=i,s=ee(i,["body"]);return Promise.resolve(new Response(a,s))}return fetch(o,e)}class ze{constructor({Root:e,fallback:t,target:r,session:l}){this.Root=e,this.fallback=t,this.router,this.target=r,this.started=!1,this.session_id=1,this.invalid=new Set,this.invalidating=null,this.autoscroll=!0,this.updating=!1,this.current={url:null,session_id:0,branch:[]},this.cache=new Map,this.loading={id:null,promise:null},this.stores={url:ce({}),page:ce({}),navigating:K(null),session:K(l),updated:Ke()},this.$session=null,this.root=null;let i=!1;this.stores.session.subscribe(async a=>{if(this.$session=a,!i||!this.router)return;this.session_id+=1;const s=this.router.parse(new URL(location.href));s&&this.update(s,[],!0)}),i=!0}disable_scroll_handling(){(this.updating||!this.started)&&(this.autoscroll=!1)}async start({status:e,error:t,nodes:r,url:l,params:i}){const a=[];let s={},n,c;l.hash=window.location.hash;try{for(let f=0;f10||t.includes(e.url.pathname))a=await this._load_error({status:500,error:new Error("Redirect loop"),url:e.url});else{this.router?this.router.goto(new URL(a.redirect,e.url).href,{replaceState:!0},[...t,e.url.pathname]):location.href=new URL(a.redirect,location.href).href;return}else if(((c=(n=a.props)==null?void 0:n.page)==null?void 0:c.status)>=400&&await this.stores.updated.check()){location.href=e.url.href;return}if(this.updating=!0,this.started?(this.current=a.state,this.root.$set(a.props),this.stores.navigating.set(null)):this._init(a),l){const{scroll:h,keepfocus:u}=l;if(u||((f=getSelection())==null||f.removeAllRanges(),document.body.focus()),await te(),this.autoscroll){const d=e.url.hash&&document.getElementById(e.url.hash.slice(1));h?scrollTo(h.x,h.y):d?d.scrollIntoView():scrollTo(0,0)}}else await te();if(this.loading.promise=null,this.loading.id=null,this.autoscroll=!0,this.updating=!1,!this.router)return;const s=a.state.branch[a.state.branch.length-1];s&&s.module.router===!1?this.router.disable():this.router.enable()}load(e){return this.loading.promise=this._get_navigation_result(e,!1),this.loading.id=e.id,this.loading.promise}invalidate(e){return this.invalid.add(e),this.invalidating||(this.invalidating=Promise.resolve().then(async()=>{const t=this.router&&this.router.parse(new URL(location.href));t&&await this.update(t,[],!0),this.invalidating=null})),this.invalidating}_init(e){this.current=e.state;const t=document.querySelector("style[data-svelte]");if(t&&t.remove(),this.root=new this.Root({target:this.target,props:y({stores:this.stores},e.props),hydrate:!0}),this.started=!0,this.router){const r={from:null,to:new URL(location.href)};this.router.callbacks.after_navigate.forEach(l=>l(r))}}async _get_navigation_result(e,t){if(this.loading.id===e.id&&this.loading.promise)return this.loading.promise;for(let r=0;rn()),i+=1;else break}const a=await this._load({route:l,info:e},t);if(a)return a}return await this._load_error({status:404,error:new Error(`Not found: ${e.url.pathname}`),url:e.url})}async _get_navigation_result_from_branch({url:e,params:t,stuff:r,branch:l,status:i,error:a}){const s=l.filter(Boolean),n=s.find(u=>u.loaded&&u.loaded.redirect),c={redirect:n&&n.loaded?n.loaded.redirect:void 0,state:{url:e,params:t,branch:l,session_id:this.session_id},props:{components:s.map(u=>u.module.default)}};for(let u=0;u{Object.defineProperty(c.props.page,d,{get:()=>{throw new Error(`$page.${d} has been replaced by $page.url.${_}`)}})};u("origin","origin"),u("path","pathname"),u("query","searchParams")}const f=s[s.length-1],h=f.loaded&&f.loaded.maxage;if(h){const u=e.pathname+e.search;let d=!1;const _=()=>{this.cache.get(u)===c&&this.cache.delete(u),E(),clearTimeout(N)},N=setTimeout(_,h*1e3),E=this.stores.session.subscribe(()=>{d&&_()});d=!0,this.cache.set(u,c)}return c}async _load_node({status:e,error:t,module:r,url:l,params:i,stuff:a,props:s}){const n={module:r,uses:{params:new Set,url:!1,session:!1,stuff:!1,dependencies:new Set},loaded:null,stuff:a};s&&n.uses.dependencies.add(l.href);const c={};for(const h in i)Object.defineProperty(c,h,{get(){return n.uses.params.add(h),i[h]},enumerable:!0});const f=this.$session;if(r.load){const{started:h}=this,u={params:c,props:s||{},get url(){return n.uses.url=!0,l},get session(){return n.uses.session=!0,f},get stuff(){return n.uses.stuff=!0,y({},a)},fetch(_,N){const E=typeof _=="string"?_:_.url,{href:R}=new URL(E,l);return n.uses.dependencies.add(R),h?fetch(_,N):We(_,N)}};t&&(u.status=e,u.error=t);const d=await r.load.call(null,u);if(!d)throw new Error("load function must return a value");n.loaded=le(d),n.loaded.stuff&&(n.stuff=n.loaded.stuff)}else s&&(n.loaded=le({props:s}));return n}async _load({route:e,info:{url:t,path:r}},l){const i=t.pathname+t.search;if(!l){const p=this.cache.get(i);if(p)return p}const[a,s,n,c,f]=e,h=c?c(a.exec(r)):{},u=this.current.url&&{url:i!==this.current.url.pathname+this.current.url.search,params:Object.keys(h).filter(p=>this.current.params[p]!==h[p]),session:this.session_id!==this.current.session_id};let d=[],_={},N=!1,E=200,R;s.forEach(p=>p());e:for(let p=0;pk.uses.params.has(T))||u.session&&k.uses.session||Array.from(k.uses.dependencies).some(T=>this.invalid.has(T))||N&&k.uses.stuff){let T={};if(f&&p===s.length-1){const B=await fetch(`${t.pathname}/__data.json`,{headers:{"x-sveltekit-noredirect":"true"}});if(B.ok){const X=B.headers.get("x-sveltekit-location");if(X)return{redirect:X,props:{},state:this.current};T=await B.json()}else E=B.status,R=new Error("Failed to load data")}if(R||(g=await this._load_node({module:m,url:t,params:h,props:T,stuff:_})),g&&g.loaded){if(g.loaded.fallthrough)return;if(g.loaded.error&&(E=g.loaded.status,R=g.loaded.error),g.loaded.redirect)return{redirect:g.loaded.redirect,props:{},state:this.current};g.loaded.stuff&&(N=!0)}}else g=k}catch(m){E=500,R=oe(m)}if(R){for(;p--;)if(n[p]){let m,k,q=p;for(;!(k=d[q]);)q-=1;try{if(m=await this._load_node({status:E,error:R,module:await n[p](),url:t,params:h,stuff:k.stuff}),m&&m.loaded&&m.loaded.error)continue;m&&m.loaded&&m.loaded.stuff&&(_=y(y({},_),m.loaded.stuff)),d=d.slice(0,q+1).concat(m);break e}catch{continue}}return await this._load_error({status:E,error:R,url:t})}else g&&g.loaded&&g.loaded.stuff&&(_=y(y({},_),g.loaded.stuff)),d.push(g)}return await this._get_navigation_result_from_branch({url:t,params:h,stuff:_,branch:d,status:E,error:R})}async _load_error({status:e,error:t,url:r}){var c,f;const l={},i=await this._load_node({module:await this.fallback[0],url:r,params:l,stuff:{}}),a=await this._load_node({status:e,error:t,module:await this.fallback[1],url:r,params:l,stuff:i&&i.loaded&&i.loaded.stuff||{}}),s=[i,a],n=y(y({},(c=i==null?void 0:i.loaded)==null?void 0:c.stuff),(f=a==null?void 0:a.loaded)==null?void 0:f.stuff);return await this._get_navigation_result_from_branch({url:r,params:l,stuff:n,branch:s,status:e,error:t})}}async function Ye({paths:o,target:e,session:t,route:r,spa:l,trailing_slash:i,hydrate:a}){const s=new ze({Root:Oe,fallback:Ie,target:e,session:t}),n=r?new Be({base:o.base,routes:Ve,trailing_slash:i,renderer:s}):null;qe(o),a&&await s.start(a),n&&(l&&n.goto(location.href,{replaceState:!0},[]),n.init_listeners()),dispatchEvent(new CustomEvent("sveltekit:start"))}export{Ye as start}; diff --git a/_app/version.json b/_app/version.json new file mode 100644 index 0000000..4993142 --- /dev/null +++ b/_app/version.json @@ -0,0 +1 @@ +{"version":"1720790592397"} \ No newline at end of file diff --git a/dev/CNAME b/dev/CNAME new file mode 100644 index 0000000..2757fda --- /dev/null +++ b/dev/CNAME @@ -0,0 +1 @@ +uc-ebook.org \ No newline at end of file diff --git a/dev/_app/assets/pages/__layout.svelte-c9f861a9.css b/dev/_app/assets/pages/__layout.svelte-c9f861a9.css new file mode 100644 index 0000000..2af3d3b --- /dev/null +++ b/dev/_app/assets/pages/__layout.svelte-c9f861a9.css @@ -0,0 +1 @@ +*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji"}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input:-ms-input-placeholder,textarea:-ms-input-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}*,:before,:after{--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.m-4{margin:1rem}.mx-6{margin-left:1.5rem;margin-right:1.5rem}.my-6{margin-top:1.5rem;margin-bottom:1.5rem}.my-3{margin-top:.75rem;margin-bottom:.75rem}.mx-3{margin-left:.75rem;margin-right:.75rem}.mb-4{margin-bottom:1rem}.mt-6{margin-top:1.5rem}.mb-12{margin-bottom:3rem}.mb-3{margin-bottom:.75rem}.mt-3{margin-top:.75rem}.flex{display:flex}.hidden{display:none}.h-16{height:4rem}.h-20{height:5rem}.h-36{height:9rem}.w-full{width:100%}.w-80{width:20rem}.w-6{width:1.5rem}.w-64{width:16rem}.w-20{width:5rem}.max-w-screen-xl{max-width:1280px}.max-w-xs{max-width:20rem}.max-w-4xl{max-width:56rem}.flex-none{flex:none}.flex-row{flex-direction:row}.flex-col{flex-direction:column}.flex-wrap{flex-wrap:wrap}.items-start{align-items:flex-start}.items-center{align-items:center}.items-stretch{align-items:stretch}.justify-start{justify-content:flex-start}.justify-center{justify-content:center}.justify-around{justify-content:space-around}.overflow-auto{overflow:auto}.rounded-md{border-radius:.375rem}.rounded-lg{border-radius:.5rem}.rounded-l-md{border-top-left-radius:.375rem;border-bottom-left-radius:.375rem}.rounded-r-md{border-top-right-radius:.375rem;border-bottom-right-radius:.375rem}.rounded-br-md{border-bottom-right-radius:.375rem}.border-y-4{border-top-width:4px;border-bottom-width:4px}.border-double{border-style:double}.border-slate-50{--tw-border-opacity: 1;border-color:rgb(248 250 252 / var(--tw-border-opacity))}.bg-slate-900{--tw-bg-opacity: 1;background-color:rgb(15 23 42 / var(--tw-bg-opacity))}.bg-slate-50{--tw-bg-opacity: 1;background-color:rgb(248 250 252 / var(--tw-bg-opacity))}.bg-sky-700{--tw-bg-opacity: 1;background-color:rgb(3 105 161 / var(--tw-bg-opacity))}.bg-no-repeat{background-repeat:no-repeat}.object-contain{-o-object-fit:contain;object-fit:contain}.p-2{padding:.5rem}.py-6{padding-top:1.5rem;padding-bottom:1.5rem}.px-3{padding-left:.75rem;padding-right:.75rem}.py-12{padding-top:3rem;padding-bottom:3rem}.pl-3{padding-left:.75rem}.pb-1{padding-bottom:.25rem}.pt-1{padding-top:.25rem}.pb-6{padding-bottom:1.5rem}.pt-3{padding-top:.75rem}.pb-12{padding-bottom:3rem}.text-left{text-align:left}.text-center{text-align:center}.font-\[\'ornaments\'\]{font-family:ornaments}.text-sm{font-size:.875rem;line-height:1.25rem}.text-3xl{font-size:1.875rem;line-height:2.25rem}.text-xl{font-size:1.25rem;line-height:1.75rem}.text-4xl{font-size:2.25rem;line-height:2.5rem}.text-2xl{font-size:1.5rem;line-height:2rem}.text-lg{font-size:1.125rem;line-height:1.75rem}.font-bold{font-weight:700}.italic{font-style:italic}.leading-tight{line-height:1.25}.text-slate-900{--tw-text-opacity: 1;color:rgb(15 23 42 / var(--tw-text-opacity))}.text-slate-50{--tw-text-opacity: 1;color:rgb(248 250 252 / var(--tw-text-opacity))}.text-slate-500{--tw-text-opacity: 1;color:rgb(100 116 139 / var(--tw-text-opacity))}.text-slate-400{--tw-text-opacity: 1;color:rgb(148 163 184 / var(--tw-text-opacity))}.shadow-md{--tw-shadow: 0 4px 6px -1px rgb(0 0 0 / .1), 0 2px 4px -2px rgb(0 0 0 / .1);--tw-shadow-colored: 0 4px 6px -1px var(--tw-shadow-color), 0 2px 4px -2px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.drop-shadow{--tw-drop-shadow: drop-shadow(0 1px 2px rgb(0 0 0 / .1)) drop-shadow(0 1px 1px rgb(0 0 0 / .06));filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.backdrop-blur-sm{--tw-backdrop-blur: blur(4px);-webkit-backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia)}@font-face{font-family:ornaments;src:url(/fonts/nymphette-webfont.woff) format("woff"),url(/fonts/nymphette-webfont.woff2) format("woff"),url(fonts/nymphette-webfont.woff) format("woff"),url(fonts/nymphette-webfont.woff2) format("woff")}.hover\:cursor-pointer:hover{cursor:pointer}.hover\:bg-sky-500:hover{--tw-bg-opacity: 1;background-color:rgb(14 165 233 / var(--tw-bg-opacity))}.hover\:text-sky-700:hover{--tw-text-opacity: 1;color:rgb(3 105 161 / var(--tw-text-opacity))}.active\:bg-orange-400:active{--tw-bg-opacity: 1;background-color:rgb(251 146 60 / var(--tw-bg-opacity))}.group:hover .group-hover\:text-sky-500{--tw-text-opacity: 1;color:rgb(14 165 233 / var(--tw-text-opacity))}@media (min-width: 768px){.md\:block{display:block}.md\:w-96{width:24rem}}@media (min-width: 1024px){.lg\:mr-6{margin-right:1.5rem}.lg\:mb-0{margin-bottom:0}.lg\:mt-6{margin-top:1.5rem}.lg\:flex-row{flex-direction:row}.lg\:items-start{align-items:flex-start}.lg\:pl-56{padding-left:14rem}.lg\:pb-6{padding-bottom:1.5rem}.lg\:text-left{text-align:left}}@media (min-width: 1280px){.xl\:rounded-md{border-radius:.375rem}} diff --git a/dev/_app/assets/pages/index.svelte-7f4057b4.css b/dev/_app/assets/pages/index.svelte-7f4057b4.css new file mode 100644 index 0000000..3bfc066 --- /dev/null +++ b/dev/_app/assets/pages/index.svelte-7f4057b4.css @@ -0,0 +1 @@ +.icon path{stroke-width:1}.text-shadow.svelte-uz1u3c{text-shadow:1px 1px 1px rgba(33,33,33,.5)} diff --git a/dev/_app/chunks/vendor-80631b75.js b/dev/_app/chunks/vendor-80631b75.js new file mode 100644 index 0000000..ce488f8 --- /dev/null +++ b/dev/_app/chunks/vendor-80631b75.js @@ -0,0 +1 @@ +function j(){}function w(e,t){for(const n in t)e[n]=t[n];return e}function Me(e){return e()}function ze(){return Object.create(null)}function I(e){e.forEach(Me)}function Ue(e){return typeof e=="function"}function Ee(e,t){return e!=e?t==t:e!==t||e&&typeof e=="object"||typeof e=="function"}let W;function mt(e,t){return W||(W=document.createElement("a")),W.href=t,e===W.href}function Xe(e){return Object.keys(e).length===0}function gt(e,t,n,i){if(e){const l=je(e,t,n,i);return e[0](l)}}function je(e,t,n,i){return e[1]&&i?w(n.ctx.slice(),e[1](i(t))):n.ctx}function pt(e,t,n,i){if(e[2]&&i){const l=e[2](i(n));if(t.dirty===void 0)return l;if(typeof l=="object"){const s=[],f=Math.max(t.dirty.length,l.length);for(let u=0;u32){const t=[],n=e.ctx.length/32;for(let i=0;i>1);n(l)<=i?e=l+1:t=l}return e}function tt(e){if(e.hydrate_init)return;e.hydrate_init=!0;let t=e.childNodes;if(e.nodeName==="HEAD"){const c=[];for(let a=0;a0&&t[n[l]].claim_order<=a?l+1:et(1,l,m=>t[n[m]].claim_order,a))-1;i[c]=n[_]+1;const p=_+1;n[p]=c,l=Math.max(p,l)}const s=[],f=[];let u=t.length-1;for(let c=n[l]+1;c!=0;c=i[c-1]){for(s.push(t[c-1]);u>=c;u--)f.push(t[u]);u--}for(;u>=0;u--)f.push(t[u]);s.reverse(),f.sort((c,a)=>c.claim_order-a.claim_order);for(let c=0,a=0;c=s[a].claim_order;)a++;const _=ae.removeEventListener(t,n,i)}function lt(e,t,n){n==null?e.removeAttribute(t):e.getAttribute(t)!==n&&e.setAttribute(t,n)}function x(e,t){for(const n in t)lt(e,n,t[n])}function E(e){return Array.from(e.childNodes)}function it(e){e.claim_info===void 0&&(e.claim_info={last_index:0,total_claimed:0})}function Be(e,t,n,i,l=!1){it(e);const s=(()=>{for(let f=e.claim_info.last_index;f=0;f--){const u=e[f];if(t(u)){const c=n(u);return c===void 0?e.splice(f,1):e[f]=c,l?c===void 0&&e.claim_info.last_index--:e.claim_info.last_index=f,u}}return i()})();return s.claim_order=e.claim_info.total_claimed,e.claim_info.total_claimed+=1,s}function Se(e,t,n,i){return Be(e,l=>l.nodeName===t,l=>{const s=[];for(let f=0;fl.removeAttribute(f))},()=>i(t))}function xt(e,t,n){return Se(e,t,n,nt)}function B(e,t,n){return Se(e,t,n,z)}function ot(e,t){return Be(e,n=>n.nodeType===3,n=>{const i=""+t;if(n.data.startsWith(i)){if(n.data.length!==i.length)return n.splitText(i.length)}else n.data=i},()=>U(t),!0)}function wt(e){return ot(e," ")}function Ct(e,t){t=""+t,e.wholeText!==t&&(e.data=t)}function Mt(e,t,n,i){n===null?e.style.removeProperty(t):e.style.setProperty(t,n,i?"important":"")}function zt(e,t){for(let n=0;n{Q.delete(e),i&&(n&&e.d(1),i())}),e.o(t)}}function N(e,t){const n={},i={},l={$$scope:1};let s=e.length;for(;s--;){const f=e[s],u=t[s];if(u){for(const c in f)c in u||(i[c]=1);for(const c in u)l[c]||(n[c]=u[c],l[c]=1);e[s]=u}else for(const c in f)l[c]=1}for(const f in i)f in n||(n[f]=void 0);return n}function Vt(e){return typeof e=="object"&&e!==null?e:{}}function qt(e){e&&e.c()}function Tt(e,t){e&&e.l(t)}function at(e,t,n,i){const{fragment:l,on_mount:s,on_destroy:f,after_update:u}=e.$$;l&&l.m(t,n),i||Z(()=>{const c=s.map(Me).filter(Ue);f?f.push(...c):I(c),e.$$.on_mount=[]}),u.forEach(Z)}function st(e,t){const n=e.$$;n.fragment!==null&&(I(n.on_destroy),n.fragment&&n.fragment.d(t),n.on_destroy=n.fragment=null,n.ctx=[])}function ut(e,t){e.$$.dirty[0]===-1&&(D.push(e),Ve(),e.$$.dirty.fill(0)),e.$$.dirty[t/31|0]|=1<{const g=M.length?M[0]:m;return a.ctx&&l(a.ctx[p],a.ctx[p]=g)&&(!a.skip_bound&&a.bound[p]&&a.bound[p](g),_&&ut(e,p)),m}):[],a.update(),_=!0,I(a.before_update),a.fragment=i?i(a.ctx):!1,t.target){if(t.hydrate){Ze();const p=E(t.target);a.fragment&&a.fragment.l(p),p.forEach(b)}else a.fragment&&a.fragment.c();t.intro&&ct(e.$$.fragment),at(e,t.target,t.anchor,t.customElement),$e(),qe()}P(c)}class ht{$destroy(){st(this,1),this.$destroy=j}$on(t,n){const i=this.$$.callbacks[t]||(this.$$.callbacks[t]=[]);return i.push(n),()=>{const l=i.indexOf(n);l!==-1&&i.splice(l,1)}}$set(t){this.$$set&&!Xe(t)&&(this.$$.skip_bound=!0,this.$$set(t),this.$$.skip_bound=!1)}}const T=[];function It(e,t=j){let n;const i=new Set;function l(u){if(Ee(e,u)&&(e=u,n)){const c=!T.length;for(const a of i)a[1](),T.push(a,e);if(c){for(let a=0;a{i.delete(a),i.size===0&&(n(),n=null)}}return{set:l,update:s,subscribe:f}}function Te(e,t,n){const i=e.slice();return i[5]=t[n],i}function Ie(e,t,n){const i=e.slice();return i[5]=t[n],i}function Oe(e,t,n){const i=e.slice();return i[5]=t[n],i}function Pe(e,t,n){const i=e.slice();return i[5]=t[n],i}function De(e,t,n){const i=e.slice();return i[5]=t[n],i}function Fe(e,t,n){const i=e.slice();return i[5]=t[n],i}function We(e){let t,n=[e[5]],i={};for(let l=0;l{t=w(w({},t),Ye(a)),n(2,s=Ae(t,l)),"src"in a&&n(3,f=a.src),"size"in a&&n(0,u=a.size),"theme"in a&&n(4,c=a.theme)},e.$$.update=()=>{var a;e.$$.dirty&24&&n(1,i=(a=f==null?void 0:f[c])!=null?a:f==null?void 0:f.default)},[u,i,s,f,c]}class Ot extends ht{constructor(t){super();ft(this,t,dt,_t,Ee,{src:3,size:0,theme:4})}}const Pt={default:{a:{fill:"none",viewBox:"0 0 24 24",stroke:"currentColor","aria-hidden":"true"},path:[{"stroke-linecap":"round","stroke-linejoin":"round","stroke-width":"2",d:"M12 6.253v13m0-13C10.832 5.477 9.246 5 7.5 5S4.168 5.477 3 6.253v13C4.168 18.477 5.754 18 7.5 18s3.332.477 4.5 1.253m0-13C13.168 5.477 14.754 5 16.5 5c1.747 0 3.332.477 4.5 1.253v13C19.832 18.477 18.247 18 16.5 18c-1.746 0-3.332.477-4.5 1.253"}]},solid:{a:{viewBox:"0 0 20 20",fill:"currentColor","aria-hidden":"true"},path:[{d:"M9 4.804A7.968 7.968 0 005.5 4c-1.255 0-2.443.29-3.5.804v10A7.969 7.969 0 015.5 14c1.669 0 3.218.51 4.5 1.385A7.962 7.962 0 0114.5 14c1.255 0 2.443.29 3.5.804v-10A7.968 7.968 0 0014.5 4c-1.255 0-2.443.29-3.5.804V12a1 1 0 11-2 0V4.804z"}]}},Dt={default:{a:{fill:"none",viewBox:"0 0 24 24",stroke:"currentColor","aria-hidden":"true"},path:[{"stroke-linecap":"round","stroke-linejoin":"round","stroke-width":"2",d:"M8 5H6a2 2 0 00-2 2v12a2 2 0 002 2h10a2 2 0 002-2v-1M8 5a2 2 0 002 2h2a2 2 0 002-2M8 5a2 2 0 012-2h2a2 2 0 012 2m0 0h2a2 2 0 012 2v3m2 4H10m0 0l3-3m-3 3l3 3"}]},solid:{a:{viewBox:"0 0 20 20",fill:"currentColor","aria-hidden":"true"},path:[{d:"M8 2a1 1 0 000 2h2a1 1 0 100-2H8z"},{d:"M3 5a2 2 0 012-2 3 3 0 003 3h2a3 3 0 003-3 2 2 0 012 2v6h-4.586l1.293-1.293a1 1 0 00-1.414-1.414l-3 3a1 1 0 000 1.414l3 3a1 1 0 001.414-1.414L10.414 13H15v3a2 2 0 01-2 2H5a2 2 0 01-2-2V5zM15 11h2a1 1 0 110 2h-2v-2z"}]}},Ft={default:{a:{fill:"none",viewBox:"0 0 24 24",stroke:"currentColor","aria-hidden":"true"},path:[{"stroke-linecap":"round","stroke-linejoin":"round","stroke-width":"2",d:"M10 20l4-16m4 4l4 4-4 4M6 16l-4-4 4-4"}]},solid:{a:{viewBox:"0 0 20 20",fill:"currentColor","aria-hidden":"true"},path:[{"fill-rule":"evenodd",d:"M12.316 3.051a1 1 0 01.633 1.265l-4 12a1 1 0 11-1.898-.632l4-12a1 1 0 011.265-.633zM5.707 6.293a1 1 0 010 1.414L3.414 10l2.293 2.293a1 1 0 11-1.414 1.414l-3-3a1 1 0 010-1.414l3-3a1 1 0 011.414 0zm8.586 0a1 1 0 011.414 0l3 3a1 1 0 010 1.414l-3 3a1 1 0 11-1.414-1.414L16.586 10l-2.293-2.293a1 1 0 010-1.414z","clip-rule":"evenodd"}]}},Wt={default:{a:{fill:"none",viewBox:"0 0 24 24",stroke:"currentColor","aria-hidden":"true"},path:[{"stroke-linecap":"round","stroke-linejoin":"round","stroke-width":"2",d:"M12 10v6m0 0l-3-3m3 3l3-3m2 8H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z"}]},solid:{a:{viewBox:"0 0 20 20",fill:"currentColor","aria-hidden":"true"},path:[{"fill-rule":"evenodd",d:"M6 2a2 2 0 00-2 2v12a2 2 0 002 2h8a2 2 0 002-2V7.414A2 2 0 0015.414 6L12 2.586A2 2 0 0010.586 2H6zm5 6a1 1 0 10-2 0v3.586l-1.293-1.293a1 1 0 10-1.414 1.414l3 3a1 1 0 001.414 0l3-3a1 1 0 00-1.414-1.414L11 11.586V8z","clip-rule":"evenodd"}]}},Gt={default:{a:{viewBox:"0 0 24 24",fill:"none",stroke:"currentColor","stroke-width":"2","stroke-linecap":"round","stroke-linejoin":"round"},path:[{d:"M18 2h-3a5 5 0 0 0-5 5v3H7v4h3v8h4v-8h3l1-4h-4V7a1 1 0 0 1 1-1h3z"}]}},Jt={default:{a:{viewBox:"0 0 24 24",fill:"none",stroke:"currentColor","stroke-width":"2","stroke-linecap":"round","stroke-linejoin":"round"},path:[{d:"M16 8a6 6 0 0 1 6 6v7h-4v-7a2 2 0 0 0-2-2 2 2 0 0 0-2 2v7h-4v-7a6 6 0 0 1 6-6z"}],rect:[{x:"2",y:"9",width:"4",height:"12"}],circle:[{cx:"4",cy:"4",r:"2"}]}},Kt={default:{a:{viewBox:"0 0 24 24",fill:"none",stroke:"currentColor","stroke-width":"2","stroke-linecap":"round","stroke-linejoin":"round"},path:[{d:"M23 3a10.9 10.9 0 0 1-3.14 1.53 4.48 4.48 0 0 0-7.86 3v1A10.66 10.66 0 0 1 3 4s-4 9 5 13a11.64 11.64 0 0 1-7 2c9 5 20 0 20-11.5a4.5 4.5 0 0 0-.08-.83A7.72 7.72 0 0 0 23 3z"}]}};export{Vt as A,st as B,w as C,It as D,St as E,gt as F,vt as G,yt as H,pt as I,V as J,j as K,Ot as L,Dt as M,Z as N,zt as O,bt as P,q as Q,I as R,ht as S,Et as T,mt as U,Kt as V,Gt as W,Jt as X,Pt as Y,Wt as Z,Ft as _,E as a,lt as b,xt as c,b as d,nt as e,Mt as f,A as g,ot as h,ft as i,Ct as j,kt as k,C as l,wt as m,Nt as n,Lt as o,Ht as p,ct as q,Bt as r,Ee as s,U as t,At as u,jt as v,qt as w,Tt as x,at as y,N as z}; diff --git a/dev/_app/error.svelte-bcbf8e93.js b/dev/_app/error.svelte-bcbf8e93.js new file mode 100644 index 0000000..4f0f8d4 --- /dev/null +++ b/dev/_app/error.svelte-bcbf8e93.js @@ -0,0 +1 @@ +import{S as K,i as w,s as y,e as v,t as E,c as d,a as b,h as P,d as m,g as n,J as R,j,k as N,l as q,m as S,K as C}from"./chunks/vendor-80631b75.js";function H(r){let l,t=r[1].frame+"",a;return{c(){l=v("pre"),a=E(t)},l(f){l=d(f,"PRE",{});var s=b(l);a=P(s,t),s.forEach(m)},m(f,s){n(f,l,s),R(l,a)},p(f,s){s&2&&t!==(t=f[1].frame+"")&&j(a,t)},d(f){f&&m(l)}}}function J(r){let l,t=r[1].stack+"",a;return{c(){l=v("pre"),a=E(t)},l(f){l=d(f,"PRE",{});var s=b(l);a=P(s,t),s.forEach(m)},m(f,s){n(f,l,s),R(l,a)},p(f,s){s&2&&t!==(t=f[1].stack+"")&&j(a,t)},d(f){f&&m(l)}}}function z(r){let l,t,a,f,s=r[1].message+"",c,k,u,p,i=r[1].frame&&H(r),o=r[1].stack&&J(r);return{c(){l=v("h1"),t=E(r[0]),a=N(),f=v("pre"),c=E(s),k=N(),i&&i.c(),u=N(),o&&o.c(),p=q()},l(e){l=d(e,"H1",{});var _=b(l);t=P(_,r[0]),_.forEach(m),a=S(e),f=d(e,"PRE",{});var h=b(f);c=P(h,s),h.forEach(m),k=S(e),i&&i.l(e),u=S(e),o&&o.l(e),p=q()},m(e,_){n(e,l,_),R(l,t),n(e,a,_),n(e,f,_),R(f,c),n(e,k,_),i&&i.m(e,_),n(e,u,_),o&&o.m(e,_),n(e,p,_)},p(e,[_]){_&1&&j(t,e[0]),_&2&&s!==(s=e[1].message+"")&&j(c,s),e[1].frame?i?i.p(e,_):(i=H(e),i.c(),i.m(u.parentNode,u)):i&&(i.d(1),i=null),e[1].stack?o?o.p(e,_):(o=J(e),o.c(),o.m(p.parentNode,p)):o&&(o.d(1),o=null)},i:C,o:C,d(e){e&&m(l),e&&m(a),e&&m(f),e&&m(k),i&&i.d(e),e&&m(u),o&&o.d(e),e&&m(p)}}}function D({error:r,status:l}){return{props:{error:r,status:l}}}function A(r,l,t){let{status:a}=l,{error:f}=l;return r.$$set=s=>{"status"in s&&t(0,a=s.status),"error"in s&&t(1,f=s.error)},[a,f]}class F extends K{constructor(l){super();w(this,l,A,z,y,{status:0,error:1})}}export{F as default,D as load}; diff --git a/dev/_app/manifest.json b/dev/_app/manifest.json new file mode 100644 index 0000000..1c2a870 --- /dev/null +++ b/dev/_app/manifest.json @@ -0,0 +1,51 @@ +{ + ".svelte-kit/runtime/client/start.js": { + "file": "start-1815c519.js", + "src": ".svelte-kit/runtime/client/start.js", + "isEntry": true, + "imports": [ + "_vendor-80631b75.js" + ], + "dynamicImports": [ + "src/routes/__layout.svelte", + ".svelte-kit/runtime/components/error.svelte", + "src/routes/index.svelte" + ] + }, + "src/routes/__layout.svelte": { + "file": "pages/__layout.svelte-3f8aeddf.js", + "src": "src/routes/__layout.svelte", + "isEntry": true, + "isDynamicEntry": true, + "imports": [ + "_vendor-80631b75.js" + ], + "css": [ + "assets/pages/__layout.svelte-c9f861a9.css" + ] + }, + ".svelte-kit/runtime/components/error.svelte": { + "file": "error.svelte-bcbf8e93.js", + "src": ".svelte-kit/runtime/components/error.svelte", + "isEntry": true, + "isDynamicEntry": true, + "imports": [ + "_vendor-80631b75.js" + ] + }, + "src/routes/index.svelte": { + "file": "pages/index.svelte-de143cc1.js", + "src": "src/routes/index.svelte", + "isEntry": true, + "isDynamicEntry": true, + "imports": [ + "_vendor-80631b75.js" + ], + "css": [ + "assets/pages/index.svelte-7f4057b4.css" + ] + }, + "_vendor-80631b75.js": { + "file": "chunks/vendor-80631b75.js" + } +} \ No newline at end of file diff --git a/dev/_app/pages/__layout.svelte-3f8aeddf.js b/dev/_app/pages/__layout.svelte-3f8aeddf.js new file mode 100644 index 0000000..13ceb37 --- /dev/null +++ b/dev/_app/pages/__layout.svelte-3f8aeddf.js @@ -0,0 +1 @@ +import{S as l,i,s as r,F as u,G as f,H as _,I as c,q as p,o as d}from"../chunks/vendor-80631b75.js";function m(n){let s;const o=n[1].default,e=u(o,n,n[0],null);return{c(){e&&e.c()},l(t){e&&e.l(t)},m(t,a){e&&e.m(t,a),s=!0},p(t,[a]){e&&e.p&&(!s||a&1)&&f(e,o,t,t[0],s?c(o,t[0],a,null):_(t[0]),null)},i(t){s||(p(e,t),s=!0)},o(t){d(e,t),s=!1},d(t){e&&e.d(t)}}}function $(n,s,o){let{$$slots:e={},$$scope:t}=s;return n.$$set=a=>{"$$scope"in a&&o(0,t=a.$$scope)},[t,e]}class h extends l{constructor(s){super();i(this,s,$,m,r,{})}}export{h as default}; diff --git a/dev/_app/pages/index.svelte-de143cc1.js b/dev/_app/pages/index.svelte-de143cc1.js new file mode 100644 index 0000000..01fd3e4 --- /dev/null +++ b/dev/_app/pages/index.svelte-de143cc1.js @@ -0,0 +1,20 @@ +import{S as Bt,i as Kt,s as Ct,e as r,t as y,c as i,a as o,h as b,d as s,g as B,J as e,K as ft,L as Ae,M as Nt,k as w,w as Ve,m as k,x as Se,N as Zt,b as a,f as pe,O as Ot,y as Me,P as Pt,j as Xt,q as N,o as Y,Q as ht,B as Te,R as Qt,T as Wt,U as Ut,V as Yt,W as el,X as tl,p as ll,v as sl,n as al,Y as rl,Z as il,_ as nl}from"../chunks/vendor-80631b75.js";function Gt(d,l,c){const n=d.slice();return n[0]=l[c],n}function Ht(d){let l,c=d[0]+"",n,p;return{c(){l=r("option"),n=y(c),this.h()},l(m){l=i(m,"OPTION",{});var u=o(l);n=b(u,c),u.forEach(s),this.h()},h(){l.__value=p=d[0],l.value=l.__value},m(m,u){B(m,l,u),e(l,n)},p:ft,d(m){m&&s(l)}}}function ol(d){let l,c,n,p,m,u,g,E=(d[1][d[0]]||"")+"",R,T,M,I,j,_,$,S=Object.keys(d[1]),x=[];for(let h=0;hd[3].call(n)),a(c,"class","mb-4"),a(g,"class","m-4 text-sm text-slate-900 w-80 md:w-96 text-left leading-tight "),pe(u,"box-shadow","inset 0 2px 4px 0 rgb(0 0 0 / 0.2)"),a(u,"class","bg-slate-50 rounded-l-md overflow-auto"),a(M,"title","Copy to clipboard"),a(M,"class","rounded-r-md drop-shadow p-2 bg-sky-700 hover:bg-sky-500 hover:cursor-pointer active:bg-orange-400"),a(m,"class","flex flex-row items-stretch justify-center"),a(l,"class","flex flex-col items-center justify-center")},m(h,A){B(h,l,A),e(l,c),e(c,n);for(let D=0;D{try{navigator.clipboard.writeText(p[n])}catch{}};function u(){n=Wt(this),c(0,n),c(1,p)}return[n,p,m,u]}class dl extends Bt{constructor(l){super();Kt(this,l,cl,ol,Ct,{})}}function Lt(d,l,c){const n=d.slice();return n[6]=l[c],n[8]=c,n}function zt(d,l,c){const n=d.slice();return n[9]=l[c],n}function Ft(d){let l,c,n,p,m=d[9].title+"",u,g,E,R=d[9].text+"",T,M,I,j;return c=new Ae({props:{src:d[9].icon,class:"icon w-20 text-slate-50 group-hover:text-sky-500"}}),{c(){l=r("a"),Ve(c.$$.fragment),n=w(),p=r("div"),u=y(m),g=w(),E=r("div"),T=y(R),M=w(),this.h()},l(_){l=i(_,"A",{target:!0,href:!0,class:!0});var $=o(l);Se(c.$$.fragment,$),n=k($),p=i($,"DIV",{class:!0});var S=o(p);u=b(S,m),S.forEach(s),g=k($),E=i($,"DIV",{class:!0});var x=o(E);T=b(x,R),x.forEach(s),M=k($),$.forEach(s),this.h()},h(){a(p,"class","text-xl font-bold text-slate-50 group-hover:text-sky-500"),a(E,"class","text-md text-slate-50 group-hover:text-sky-500"),a(l,"target","_blank"),a(l,"href",I=d[9].url),a(l,"class","flex-none flex flex-col items-center justify-center w-64 pt-3 pb-6 group")},m(_,$){B(_,l,$),Me(c,l,null),e(l,n),e(l,p),e(p,u),e(l,g),e(l,E),e(E,T),e(l,M),j=!0},p:ft,i(_){j||(N(c.$$.fragment,_),j=!0)},o(_){Y(c.$$.fragment,_),j=!1},d(_){_&&s(l),Te(c)}}}function qt(d){let l,c,n,p,m=d[8]+1+"",u,g,E,R=d[6].text+"",T,M,I,j,_;return{c(){l=r("div"),c=r("div"),n=r("div"),p=r("div"),u=y(m),g=w(),E=r("div"),T=y(R),M=w(),I=r("img"),_=w(),this.h()},l($){l=i($,"DIV",{class:!0});var S=o(l);c=i(S,"DIV",{class:!0});var x=o(c);n=i(x,"DIV",{class:!0});var h=o(n);p=i(h,"DIV",{class:!0});var A=o(p);u=b(A,m),A.forEach(s),h.forEach(s),g=k(x),E=i(x,"DIV",{class:!0});var D=o(E);T=b(D,R),D.forEach(s),x.forEach(s),M=k(S),I=i(S,"IMG",{class:!0,src:!0,alt:!0}),_=k(S),S.forEach(s),this.h()},h(){a(p,"class","text-4xl font-bold text-slate-50 text-shadow svelte-uz1u3c"),a(n,"class","bg-slate-900 w-20 h-20 flex-none flex items-center justify-center rounded-br-md"),a(E,"class","text-md text-slate-900 mx-3 mt-3 leading-tight"),a(c,"class","flex flex-row items-start justify-start text-left mb-3"),a(I,"class","w-full h-36 my-3 object-contain"),Ut(I.src,j=d[6].image)||a(I,"src",j),a(I,"alt",""),a(l,"class","bg-slate-50 w-80 flex-none mb-12 lg:mb-0 shadow-md rounded-md")},m($,S){B($,l,S),e(l,c),e(c,n),e(n,p),e(p,u),e(c,g),e(c,E),e(E,T),e(l,M),e(l,I),e(l,_)},p:ft,d($){$&&s(l)}}}function ul(d){let l,c,n,p,m,u,g,E,R,T,M,I,j,_,$,S,x,h,A,D,P,Z,ee,C,L,te,Le,ge,ze,Fe,_e,qe,Re,z,xe,Je,Ne,le,je,F,ye,Ze,Xe,se,Be,q,be,Qe,We,G,X,ae,Ye,Q,re,et,W,ie,Ke,J,we,tt,lt,ke,ne,Ce,oe,O,st,ce,at,rt,de,it,nt,ue,ot,ct,fe,dt,ut,he,ve=d[3],V=[];for(let t=0;tY(V[t],1,1,()=>{V[t]=null});let Ee=d[4],K=[];for(let t=0;t{var R,T;document.title="Addressing Uncertainty",m=encodeURIComponent((T=(R=window==null?void 0:window.location)==null?void 0:R.href)!=null?T:""),c(0,u=`https://www.facebook.com/sharer.php?u=${m}`),c(1,g=`https://twitter.com/intent/tweet?url=${m}&text=Check%20out%20this%20eBook%3A%20Addressing%20Uncertainty%20in%20MultiSector%20Dynamics%20Research&hashtags=DOE,IM3,MultiSectorDynamics`),c(2,E=`https://www.linkedin.com/shareArticle?mini=true&url=${m}&title=Check%20out%20this%20eBook%3A%20Addressing%20Uncertainty%20in%20MultiSector%20Dynamics%20Research&summary=Open%20access%20eBook%20showcasing%20sensitivity%20analysis%20and%20diagnostic%20model%20evaluation%20techniques.`)}),[u,g,E,n,p]}class ml extends Bt{constructor(l){super();Kt(this,l,fl,ul,Ct,{})}}export{ml as default,vl as router}; diff --git a/dev/_app/start-1815c519.js b/dev/_app/start-1815c519.js new file mode 100644 index 0000000..72015b2 --- /dev/null +++ b/dev/_app/start-1815c519.js @@ -0,0 +1 @@ +var fe=Object.defineProperty,ue=Object.defineProperties;var he=Object.getOwnPropertyDescriptors;var J=Object.getOwnPropertySymbols;var H=Object.prototype.hasOwnProperty,Q=Object.prototype.propertyIsEnumerable;var Z=(o,e,t)=>e in o?fe(o,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):o[e]=t,y=(o,e)=>{for(var t in e||(e={}))H.call(e,t)&&Z(o,t,e[t]);if(J)for(var t of J(e))Q.call(e,t)&&Z(o,t,e[t]);return o},W=(o,e)=>ue(o,he(e));var ee=(o,e)=>{var t={};for(var r in o)H.call(o,r)&&e.indexOf(r)<0&&(t[r]=o[r]);if(o!=null&&J)for(var r of J(o))e.indexOf(r)<0&&Q.call(o,r)&&(t[r]=o[r]);return t};import{S as de,i as _e,s as pe,e as ge,c as me,a as we,d as $,b as z,f as A,g as S,t as be,h as ve,j as ye,k as ke,l as w,m as $e,n as x,o as b,p as O,q as v,r as Ee,u as Re,v as G,w as L,x as P,y as U,z as C,A as V,B as j,C as I,D as K,E as te}from"./chunks/vendor-80631b75.js";function Se(o){let e,t,r;const l=[o[1]||{}];var i=o[0][0];function a(s){let n={};for(let c=0;c{j(f,1)}),O()}i?(e=new i(a()),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&j(e,s)}}}function Le(o){let e,t,r;const l=[o[1]||{}];var i=o[0][0];function a(s){let n={$$slots:{default:[Ne]},$$scope:{ctx:s}};for(let c=0;c{j(f,1)}),O()}i?(e=new i(a(s)),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&j(e,s)}}}function Ue(o){let e,t,r;const l=[o[2]||{}];var i=o[0][1];function a(s){let n={};for(let c=0;c{j(f,1)}),O()}i?(e=new i(a()),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&j(e,s)}}}function je(o){let e,t,r;const l=[o[2]||{}];var i=o[0][1];function a(s){let n={$$slots:{default:[Ae]},$$scope:{ctx:s}};for(let c=0;c{j(f,1)}),O()}i?(e=new i(a(s)),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&j(e,s)}}}function Ae(o){let e,t,r;const l=[o[3]||{}];var i=o[0][2];function a(s){let n={};for(let c=0;c{j(f,1)}),O()}i?(e=new i(a()),L(e.$$.fragment),v(e.$$.fragment,1),U(e,t.parentNode,t)):e=null}else i&&e.$set(c)},i(s){r||(e&&v(e.$$.fragment,s),r=!0)},o(s){e&&b(e.$$.fragment,s),r=!1},d(s){s&&$(t),e&&j(e,s)}}}function Ne(o){let e,t,r,l;const i=[je,Ue],a=[];function s(n,c){return n[0][2]?0:1}return e=s(o),t=a[e]=i[e](o),{c(){t.c(),r=w()},l(n){t.l(n),r=w()},m(n,c){a[e].m(n,c),S(n,r,c),l=!0},p(n,c){let f=e;e=s(n),e===f?a[e].p(n,c):(x(),b(a[f],1,1,()=>{a[f]=null}),O(),t=a[e],t?t.p(n,c):(t=a[e]=i[e](n),t.c()),v(t,1),t.m(r.parentNode,r))},i(n){l||(v(t),l=!0)},o(n){b(t),l=!1},d(n){a[e].d(n),n&&$(r)}}}function se(o){let e,t=o[5]&&re(o);return{c(){e=ge("div"),t&&t.c(),this.h()},l(r){e=me(r,"DIV",{id:!0,"aria-live":!0,"aria-atomic":!0,style:!0});var l=we(e);t&&t.l(l),l.forEach($),this.h()},h(){z(e,"id","svelte-announcer"),z(e,"aria-live","assertive"),z(e,"aria-atomic","true"),A(e,"position","absolute"),A(e,"left","0"),A(e,"top","0"),A(e,"clip","rect(0 0 0 0)"),A(e,"clip-path","inset(50%)"),A(e,"overflow","hidden"),A(e,"white-space","nowrap"),A(e,"width","1px"),A(e,"height","1px")},m(r,l){S(r,e,l),t&&t.m(e,null)},p(r,l){r[5]?t?t.p(r,l):(t=re(r),t.c(),t.m(e,null)):t&&(t.d(1),t=null)},d(r){r&&$(e),t&&t.d()}}}function re(o){let e;return{c(){e=be(o[6])},l(t){e=ve(t,o[6])},m(t,r){S(t,e,r)},p(t,r){r&64&&ye(e,t[6])},d(t){t&&$(e)}}}function Te(o){let e,t,r,l,i;const a=[Le,Se],s=[];function n(f,h){return f[0][1]?0:1}e=n(o),t=s[e]=a[e](o);let c=o[4]&&se(o);return{c(){t.c(),r=ke(),c&&c.c(),l=w()},l(f){t.l(f),r=$e(f),c&&c.l(f),l=w()},m(f,h){s[e].m(f,h),S(f,r,h),c&&c.m(f,h),S(f,l,h),i=!0},p(f,[h]){let u=e;e=n(f),e===u?s[e].p(f,h):(x(),b(s[u],1,1,()=>{s[u]=null}),O(),t=s[e],t?t.p(f,h):(t=s[e]=a[e](f),t.c()),v(t,1),t.m(r.parentNode,r)),f[4]?c?c.p(f,h):(c=se(f),c.c(),c.m(l.parentNode,l)):c&&(c.d(1),c=null)},i(f){i||(v(t),i=!0)},o(f){b(t),i=!1},d(f){s[e].d(f),f&&$(r),c&&c.d(f),f&&$(l)}}}function xe(o,e,t){let{stores:r}=e,{page:l}=e,{components:i}=e,{props_0:a=null}=e,{props_1:s=null}=e,{props_2:n=null}=e;Ee("__svelte__",r),Re(r.page.notify);let c=!1,f=!1,h=null;return G(()=>{const u=r.page.subscribe(()=>{c&&(t(5,f=!0),t(6,h=document.title||"untitled page"))});return t(4,c=!0),u}),o.$$set=u=>{"stores"in u&&t(7,r=u.stores),"page"in u&&t(8,l=u.page),"components"in u&&t(0,i=u.components),"props_0"in u&&t(1,a=u.props_0),"props_1"in u&&t(2,s=u.props_1),"props_2"in u&&t(3,n=u.props_2)},o.$$.update=()=>{o.$$.dirty&384&&r.page.set(l)},[i,a,s,n,c,f,h,r,l]}class Oe extends de{constructor(e){super();_e(this,e,xe,Te,pe,{stores:7,page:8,components:0,props_0:1,props_1:2,props_2:3})}}const Pe="modulepreload",ie={},Ce="/dev/_app/",M=function(e,t){return!t||t.length===0?e():Promise.all(t.map(r=>{if(r=`${Ce}${r}`,r in ie)return;ie[r]=!0;const l=r.endsWith(".css"),i=l?'[rel="stylesheet"]':"";if(document.querySelector(`link[href="${r}"]${i}`))return;const a=document.createElement("link");if(a.rel=l?"stylesheet":Pe,l||(a.as="script",a.crossOrigin=""),a.href=r,document.head.appendChild(a),l)return new Promise((s,n)=>{a.addEventListener("load",s),a.addEventListener("error",n)})})).then(()=>e())},D=[()=>M(()=>import("./pages/__layout.svelte-3f8aeddf.js"),["pages/__layout.svelte-3f8aeddf.js","assets/pages/__layout.svelte-c9f861a9.css","chunks/vendor-80631b75.js"]),()=>M(()=>import("./error.svelte-bcbf8e93.js"),["error.svelte-bcbf8e93.js","chunks/vendor-80631b75.js"]),()=>M(()=>import("./pages/index.svelte-de143cc1.js"),["pages/index.svelte-de143cc1.js","assets/pages/index.svelte-7f4057b4.css","chunks/vendor-80631b75.js"])],Ve=[[/^\/$/,[D[0],D[2]],[D[1]]]],Ie=[D[0](),D[1]()];function De(o){let e=o.baseURI;if(!e){const t=o.getElementsByTagName("base");e=t.length?t[0].href:o.URL}return e}let Y="";function qe(o){Y=o.base,o.assets}function F(){return{x:pageXOffset,y:pageYOffset}}function ne(o){return o.composedPath().find(t=>t instanceof Node&&t.nodeName.toUpperCase()==="A")}function ae(o){return o instanceof SVGAElement?new URL(o.href.baseVal,document.baseURI):new URL(o.href)}class Be{constructor({base:e,routes:t,trailing_slash:r,renderer:l}){var i,a;this.base=e,this.routes=t,this.trailing_slash=r,this.navigating=0,this.renderer=l,l.router=this,this.enabled=!0,document.body.setAttribute("tabindex","-1"),this.current_history_index=(a=(i=history.state)==null?void 0:i["sveltekit:index"])!=null?a:0,this.current_history_index===0&&history.replaceState(W(y({},history.state),{"sveltekit:index":0}),"",location.href),this.callbacks={before_navigate:[],after_navigate:[]}}init_listeners(){"scrollRestoration"in history&&(history.scrollRestoration="manual"),addEventListener("beforeunload",i=>{let a=!1;const s={from:this.renderer.current.url,to:null,cancel:()=>a=!0};this.callbacks.before_navigate.forEach(n=>n(s)),a?(i.preventDefault(),i.returnValue=""):history.scrollRestoration="auto"}),addEventListener("load",()=>{history.scrollRestoration="manual"});let e;addEventListener("scroll",()=>{clearTimeout(e),e=setTimeout(()=>{const i=W(y({},history.state||{}),{"sveltekit:scroll":F()});history.replaceState(i,document.title,window.location.href)},200)});const t=i=>{const a=ne(i);a&&a.href&&a.hasAttribute("sveltekit:prefetch")&&this.prefetch(ae(a))};let r;const l=i=>{clearTimeout(r),r=setTimeout(()=>{var a;(a=i.target)==null||a.dispatchEvent(new CustomEvent("sveltekit:trigger_prefetch",{bubbles:!0}))},20)};addEventListener("touchstart",t),addEventListener("mousemove",l),addEventListener("sveltekit:trigger_prefetch",t),addEventListener("click",i=>{if(!this.enabled||i.button||i.which!==1||i.metaKey||i.ctrlKey||i.shiftKey||i.altKey||i.defaultPrevented)return;const a=ne(i);if(!a||!a.href)return;const s=ae(a);if(s.toString()===location.href){location.hash||i.preventDefault();return}const c=(a.getAttribute("rel")||"").split(/\s+/);if(!(a.hasAttribute("download")||c&&c.includes("external"))&&!(a instanceof SVGAElement?a.target.baseVal:a.target)){if(s.href.split("#")[0]===location.href.split("#")[0]){setTimeout(()=>history.pushState({},"",s.href));const f=this.parse(s);return f?this.renderer.update(f,[],!1):void 0}this._navigate({url:s,scroll:a.hasAttribute("sveltekit:noscroll")?F():null,keepfocus:!1,chain:[],details:{state:{},replaceState:!1},accepted:()=>i.preventDefault(),blocked:()=>i.preventDefault()})}}),addEventListener("popstate",i=>{if(i.state&&this.enabled){if(i.state["sveltekit:index"]===this.current_history_index)return;this._navigate({url:new URL(location.href),scroll:i.state["sveltekit:scroll"],keepfocus:!1,chain:[],details:null,accepted:()=>{this.current_history_index=i.state["sveltekit:index"]},blocked:()=>{const a=this.current_history_index-i.state["sveltekit:index"];history.go(a)}})}})}owns(e){return e.origin===location.origin&&e.pathname.startsWith(this.base)}parse(e){if(this.owns(e)){const t=decodeURI(e.pathname.slice(this.base.length)||"/");return{id:e.pathname+e.search,routes:this.routes.filter(([r])=>r.test(t)),url:e,path:t}}}async goto(e,{noscroll:t=!1,replaceState:r=!1,keepfocus:l=!1,state:i={}}={},a){const s=new URL(e,De(document));return this.enabled?this._navigate({url:s,scroll:t?F():null,keepfocus:l,chain:a,details:{state:i,replaceState:r},accepted:()=>{},blocked:()=>{}}):(location.href=s.href,new Promise(()=>{}))}enable(){this.enabled=!0}disable(){this.enabled=!1}async prefetch(e){const t=this.parse(e);if(!t)throw new Error("Attempted to prefetch a URL that does not belong to this app");return this.renderer.load(t)}after_navigate(e){G(()=>(this.callbacks.after_navigate.push(e),()=>{const t=this.callbacks.after_navigate.indexOf(e);this.callbacks.after_navigate.splice(t,1)}))}before_navigate(e){G(()=>(this.callbacks.before_navigate.push(e),()=>{const t=this.callbacks.before_navigate.indexOf(e);this.callbacks.before_navigate.splice(t,1)}))}async _navigate({url:e,scroll:t,keepfocus:r,chain:l,details:i,accepted:a,blocked:s}){const n=this.renderer.current.url;let c=!1;const f={from:n,to:e,cancel:()=>c=!0};if(this.callbacks.before_navigate.forEach(d=>d(f)),c){s();return}const h=this.parse(e);if(!h)return location.href=e.href,new Promise(()=>{});a(),this.navigating||dispatchEvent(new CustomEvent("sveltekit:navigation-start")),this.navigating++;let{pathname:u}=e;if(this.trailing_slash==="never"?u!=="/"&&u.endsWith("/")&&(u=u.slice(0,-1)):this.trailing_slash==="always"&&!e.pathname.split("/").pop().includes(".")&&!u.endsWith("/")&&(u+="/"),h.url=new URL(e.origin+u+e.search+e.hash),i){const d=i.replaceState?0:1;i.state["sveltekit:index"]=this.current_history_index+=d,history[i.replaceState?"replaceState":"pushState"](i.state,"",h.url)}if(await this.renderer.handle_navigation(h,l,!1,{scroll:t,keepfocus:r}),this.navigating--,!this.navigating){dispatchEvent(new CustomEvent("sveltekit:navigation-end"));const d={from:n,to:e};this.callbacks.after_navigate.forEach(_=>_(d))}}}function oe(o){return o instanceof Error||o&&o.name&&o.message?o:new Error(JSON.stringify(o))}function Je(o){let e=5381,t=o.length;if(typeof o=="string")for(;t;)e=e*33^o.charCodeAt(--t);else for(;t;)e=e*33^o[--t];return(e>>>0).toString(36)}function le(o){const e=o.status&&o.status>=400&&o.status<=599&&!o.redirect;if(o.error||e){const t=o.status;if(!o.error&&e)return{status:t||500,error:new Error};const r=typeof o.error=="string"?new Error(o.error):o.error;return r instanceof Error?!t||t<400||t>599?(console.warn('"error" returned from load() without a valid status code \u2014 defaulting to 500'),{status:500,error:r}):{status:t,error:r}:{status:500,error:new Error(`"error" property returned from load() must be a string or instance of Error, received type "${typeof r}"`)}}if(o.redirect){if(!o.status||Math.floor(o.status/100)!==3)return{status:500,error:new Error('"redirect" property returned from load() must be accompanied by a 3xx status code')};if(typeof o.redirect!="string")return{status:500,error:new Error('"redirect" property returned from load() must be a string')}}if(o.context)throw new Error('You are returning "context" from a load function. "context" was renamed to "stuff", please adjust your code accordingly.');return o}function ce(o){const e=K(o);let t=!0;function r(){t=!0,e.update(a=>a)}function l(a){t=!1,e.set(a)}function i(a){let s;return e.subscribe(n=>{(s===void 0||t&&n!==s)&&a(s=n)})}return{notify:r,set:l,subscribe:i}}function Ke(){const{set:o,subscribe:e}=K(!1),t="1720790647055";let r;async function l(){clearTimeout(r);const a=await fetch(`${Y}/_app/version.json`,{headers:{pragma:"no-cache","cache-control":"no-cache"}});if(a.ok){const{version:s}=await a.json(),n=s!==t;return n&&(o(!0),clearTimeout(r)),n}else throw new Error(`Version check failed: ${a.status}`)}return{subscribe:e,check:l}}function We(o,e){const t=typeof o=="string"?o:o.url;let r=`script[data-type="svelte-data"][data-url=${JSON.stringify(t)}]`;e&&typeof e.body=="string"&&(r+=`[data-body="${Je(e.body)}"]`);const l=document.querySelector(r);if(l&&l.textContent){const i=JSON.parse(l.textContent),{body:a}=i,s=ee(i,["body"]);return Promise.resolve(new Response(a,s))}return fetch(o,e)}class ze{constructor({Root:e,fallback:t,target:r,session:l}){this.Root=e,this.fallback=t,this.router,this.target=r,this.started=!1,this.session_id=1,this.invalid=new Set,this.invalidating=null,this.autoscroll=!0,this.updating=!1,this.current={url:null,session_id:0,branch:[]},this.cache=new Map,this.loading={id:null,promise:null},this.stores={url:ce({}),page:ce({}),navigating:K(null),session:K(l),updated:Ke()},this.$session=null,this.root=null;let i=!1;this.stores.session.subscribe(async a=>{if(this.$session=a,!i||!this.router)return;this.session_id+=1;const s=this.router.parse(new URL(location.href));s&&this.update(s,[],!0)}),i=!0}disable_scroll_handling(){(this.updating||!this.started)&&(this.autoscroll=!1)}async start({status:e,error:t,nodes:r,url:l,params:i}){const a=[];let s={},n,c;l.hash=window.location.hash;try{for(let f=0;f10||t.includes(e.url.pathname))a=await this._load_error({status:500,error:new Error("Redirect loop"),url:e.url});else{this.router?this.router.goto(new URL(a.redirect,e.url).href,{replaceState:!0},[...t,e.url.pathname]):location.href=new URL(a.redirect,location.href).href;return}else if(((c=(n=a.props)==null?void 0:n.page)==null?void 0:c.status)>=400&&await this.stores.updated.check()){location.href=e.url.href;return}if(this.updating=!0,this.started?(this.current=a.state,this.root.$set(a.props),this.stores.navigating.set(null)):this._init(a),l){const{scroll:h,keepfocus:u}=l;if(u||((f=getSelection())==null||f.removeAllRanges(),document.body.focus()),await te(),this.autoscroll){const d=e.url.hash&&document.getElementById(e.url.hash.slice(1));h?scrollTo(h.x,h.y):d?d.scrollIntoView():scrollTo(0,0)}}else await te();if(this.loading.promise=null,this.loading.id=null,this.autoscroll=!0,this.updating=!1,!this.router)return;const s=a.state.branch[a.state.branch.length-1];s&&s.module.router===!1?this.router.disable():this.router.enable()}load(e){return this.loading.promise=this._get_navigation_result(e,!1),this.loading.id=e.id,this.loading.promise}invalidate(e){return this.invalid.add(e),this.invalidating||(this.invalidating=Promise.resolve().then(async()=>{const t=this.router&&this.router.parse(new URL(location.href));t&&await this.update(t,[],!0),this.invalidating=null})),this.invalidating}_init(e){this.current=e.state;const t=document.querySelector("style[data-svelte]");if(t&&t.remove(),this.root=new this.Root({target:this.target,props:y({stores:this.stores},e.props),hydrate:!0}),this.started=!0,this.router){const r={from:null,to:new URL(location.href)};this.router.callbacks.after_navigate.forEach(l=>l(r))}}async _get_navigation_result(e,t){if(this.loading.id===e.id&&this.loading.promise)return this.loading.promise;for(let r=0;rn()),i+=1;else break}const a=await this._load({route:l,info:e},t);if(a)return a}return await this._load_error({status:404,error:new Error(`Not found: ${e.url.pathname}`),url:e.url})}async _get_navigation_result_from_branch({url:e,params:t,stuff:r,branch:l,status:i,error:a}){const s=l.filter(Boolean),n=s.find(u=>u.loaded&&u.loaded.redirect),c={redirect:n&&n.loaded?n.loaded.redirect:void 0,state:{url:e,params:t,branch:l,session_id:this.session_id},props:{components:s.map(u=>u.module.default)}};for(let u=0;u{Object.defineProperty(c.props.page,d,{get:()=>{throw new Error(`$page.${d} has been replaced by $page.url.${_}`)}})};u("origin","origin"),u("path","pathname"),u("query","searchParams")}const f=s[s.length-1],h=f.loaded&&f.loaded.maxage;if(h){const u=e.pathname+e.search;let d=!1;const _=()=>{this.cache.get(u)===c&&this.cache.delete(u),E(),clearTimeout(N)},N=setTimeout(_,h*1e3),E=this.stores.session.subscribe(()=>{d&&_()});d=!0,this.cache.set(u,c)}return c}async _load_node({status:e,error:t,module:r,url:l,params:i,stuff:a,props:s}){const n={module:r,uses:{params:new Set,url:!1,session:!1,stuff:!1,dependencies:new Set},loaded:null,stuff:a};s&&n.uses.dependencies.add(l.href);const c={};for(const h in i)Object.defineProperty(c,h,{get(){return n.uses.params.add(h),i[h]},enumerable:!0});const f=this.$session;if(r.load){const{started:h}=this,u={params:c,props:s||{},get url(){return n.uses.url=!0,l},get session(){return n.uses.session=!0,f},get stuff(){return n.uses.stuff=!0,y({},a)},fetch(_,N){const E=typeof _=="string"?_:_.url,{href:R}=new URL(E,l);return n.uses.dependencies.add(R),h?fetch(_,N):We(_,N)}};t&&(u.status=e,u.error=t);const d=await r.load.call(null,u);if(!d)throw new Error("load function must return a value");n.loaded=le(d),n.loaded.stuff&&(n.stuff=n.loaded.stuff)}else s&&(n.loaded=le({props:s}));return n}async _load({route:e,info:{url:t,path:r}},l){const i=t.pathname+t.search;if(!l){const p=this.cache.get(i);if(p)return p}const[a,s,n,c,f]=e,h=c?c(a.exec(r)):{},u=this.current.url&&{url:i!==this.current.url.pathname+this.current.url.search,params:Object.keys(h).filter(p=>this.current.params[p]!==h[p]),session:this.session_id!==this.current.session_id};let d=[],_={},N=!1,E=200,R;s.forEach(p=>p());e:for(let p=0;pk.uses.params.has(T))||u.session&&k.uses.session||Array.from(k.uses.dependencies).some(T=>this.invalid.has(T))||N&&k.uses.stuff){let T={};if(f&&p===s.length-1){const B=await fetch(`${t.pathname}/__data.json`,{headers:{"x-sveltekit-noredirect":"true"}});if(B.ok){const X=B.headers.get("x-sveltekit-location");if(X)return{redirect:X,props:{},state:this.current};T=await B.json()}else E=B.status,R=new Error("Failed to load data")}if(R||(g=await this._load_node({module:m,url:t,params:h,props:T,stuff:_})),g&&g.loaded){if(g.loaded.fallthrough)return;if(g.loaded.error&&(E=g.loaded.status,R=g.loaded.error),g.loaded.redirect)return{redirect:g.loaded.redirect,props:{},state:this.current};g.loaded.stuff&&(N=!0)}}else g=k}catch(m){E=500,R=oe(m)}if(R){for(;p--;)if(n[p]){let m,k,q=p;for(;!(k=d[q]);)q-=1;try{if(m=await this._load_node({status:E,error:R,module:await n[p](),url:t,params:h,stuff:k.stuff}),m&&m.loaded&&m.loaded.error)continue;m&&m.loaded&&m.loaded.stuff&&(_=y(y({},_),m.loaded.stuff)),d=d.slice(0,q+1).concat(m);break e}catch{continue}}return await this._load_error({status:E,error:R,url:t})}else g&&g.loaded&&g.loaded.stuff&&(_=y(y({},_),g.loaded.stuff)),d.push(g)}return await this._get_navigation_result_from_branch({url:t,params:h,stuff:_,branch:d,status:E,error:R})}async _load_error({status:e,error:t,url:r}){var c,f;const l={},i=await this._load_node({module:await this.fallback[0],url:r,params:l,stuff:{}}),a=await this._load_node({status:e,error:t,module:await this.fallback[1],url:r,params:l,stuff:i&&i.loaded&&i.loaded.stuff||{}}),s=[i,a],n=y(y({},(c=i==null?void 0:i.loaded)==null?void 0:c.stuff),(f=a==null?void 0:a.loaded)==null?void 0:f.stuff);return await this._get_navigation_result_from_branch({url:r,params:l,stuff:n,branch:s,status:e,error:t})}}async function Ye({paths:o,target:e,session:t,route:r,spa:l,trailing_slash:i,hydrate:a}){const s=new ze({Root:Oe,fallback:Ie,target:e,session:t}),n=r?new Be({base:o.base,routes:Ve,trailing_slash:i,renderer:s}):null;qe(o),a&&await s.start(a),n&&(l&&n.goto(location.href,{replaceState:!0},[]),n.init_listeners()),dispatchEvent(new CustomEvent("sveltekit:start"))}export{Ye as start}; diff --git a/dev/_app/version.json b/dev/_app/version.json new file mode 100644 index 0000000..cbf648a --- /dev/null +++ b/dev/_app/version.json @@ -0,0 +1 @@ +{"version":"1720790647055"} \ No newline at end of file diff --git a/dev/docs/addressinguncertaintyinmultisectordynamicsresearch.pdf b/dev/docs/addressinguncertaintyinmultisectordynamicsresearch.pdf new file mode 100644 index 0000000..8cfdf3d Binary files /dev/null and b/dev/docs/addressinguncertaintyinmultisectordynamicsresearch.pdf differ diff --git a/dev/docs/html/.buildinfo b/dev/docs/html/.buildinfo new file mode 100644 index 0000000..3780467 --- /dev/null +++ b/dev/docs/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 46bbe39cebfe7ce3eff669bf2e2f482d +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/dev/docs/html/.nojekyll b/dev/docs/html/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/dev/docs/html/1_introduction.html b/dev/docs/html/1_introduction.html new file mode 100644 index 0000000..3c12aef --- /dev/null +++ b/dev/docs/html/1_introduction.html @@ -0,0 +1,493 @@ + + + + + + + + + + + 1. Introduction — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Introduction

+ +
+
+ +
+
+
+ + + + +
+ +
+

1. Introduction#

+

This guidance text has been developed in support of the Integrated Multisector Multiscale Modeling (IM3) Science Focus Area’s objective to formally integrate uncertainty into its research tasks. IM3 is focused on innovative modeling to explore how human and natural system landscapes in the United States co-evolve in response to short-term shocks and long-term influences. The project’s challenging scope is to advance our ability to study the interactions between energy, water, land, and urban systems, at scales ranging from local (~1km) to the contiguous United States, while consistently addressing influences such as population change, technology change, heat waves, and drought. Uncertainty and careful model-driven scientific insights are central to the project’s science objectives shown below.

+

IM3 Key MSD Science Objectives:

+

Develop flexible, open-source, and integrated modeling capabilities that capture the structure, dynamic behavior, and emergent properties of the multiscale interactions within and between human and natural systems.

+

Use these capabilities to study the evolution, vulnerability, and resilience of interacting human and natural systems and landscapes from local to continental scales, including their responses to the compounding effects of long-term influences and short-term shocks.

+

Understand the implications of uncertainty in data, observations, models, and model coupling approaches for projections of human-natural system dynamics.

+

Addressing the objectives above poses a strong transdisciplinary challenge that depends on a diversity of models and, more specifically, a consistent framing for making model-based science inferences. The term transdisciplinary science as used here formally implies a deep integration of disciplines to aid our hypothesis-driven understanding of coupled human-natural systems–bridging differences in theory, hypothesis generation, modeling, and modes of inference [2]. The IM3 MSD research foci and questions require a deep integration across disciplines, where new modes of analysis can emerge that rapidly synthesize and exploit advances for making decision-relevant insights that at minimum acknowledge uncertainty and more ideally promote a rigorous quantitative mapping of its effects on the generality of claimed scientific insights. More broadly, diverse scientific disciplines engaged in the science of coupled human-natural systems, ranging from natural sciences to engineering and economics, employ a diversity of numerical computer models to study and understand their underlying systems of focus. The utility of these computer models hinges on their ability to represent the underlying real systems with sufficient fidelity and enable the inference of novel insights. This is particularly challenging in the case of coupled human-natural systems where there exists a multitude of interdependent human and natural processes taking place that could potentially be represented. These processes usually translate into modeled representations that are highly complex, non-linear, and exhibit strong interactions and threshold behaviors [3, 4, 5]. Model complexity and detail have also been increasing as a result of our improving understanding of these processes, the availability of data, and the rapid growth in computing power [6]. As model complexity grows, modelers need to specify a lot more information than before: additional model inputs and relationships as more processes are represented, higher resolution data as more observations are collected, new coupling relationships and interactions as diverse models are being used in combination to answer multisector questions (e.g., the land-water-energy nexus). Typically, not all of this information is well known, nor is the impact of these many uncertainties on model outputs well understood. It is further especially difficult to distinguish the effects of individual as well as interacting sources of uncertainty when modeling coupled systems with multisector and multiscale dynamics [7].

+

Given the challenge and opportunity posed by the disciplinary diversity of IM3, we utilized an informal team-wide survey to understand how the various disciplines typically address uncertainty, emphasizing key literature examples and domain-specific reviews. The feedback received provided perspectives across diverse areas within the Earth sciences, different engineering fields, as well as economics. Although our synthesis of this survey information highlighted some commonality across areas (e.g., the frequent use of scenario-based modeling), we identified key differences in vocabulary, the frequency with which formal uncertainty analysis appears in the disciplinary literature, and technical approaches. The IM3 team’s responses captured a very broad conceptual continuum of methodological traditions, ranging from deterministic (no uncertainty) modeling to the theoretical case of fully engaging in modeling sources of uncertainty. Overall, error-driven analyses that focus on replicating prior observed conditions were reported to be the most prevalent types of studies for all disciplines. It was generally less common for studies to strongly engage with analyzing uncertainty via more formal ensemble analyses and design of experiments, though some areas did show significantly higher levels of activity. Another notable finding from our survey was the apparent lack of focus on understanding how model coupling relationships shape uncertainty. Although these observations are limited to the scope of feedback attained in the team-wide IM3 survey responses and the bodies of literature reported by respondents, we believe they reflect challenges that are common across the MSD community.

+

In the IM3 uncertainty-related research that has occurred since this survey, we have observed that differences in terminology and interpretation of terminology across modeling teams can be confounding. One of the goals of this eBook is to provide a common language for uncertainty analysis within IM3 and, hopefully, for the broader MSD community. While individual scientific disciplines would be expected to retain their own terminology, by providing explicit definitions of terms we can facilitate the translation of concepts across transdisciplinary science teams. To begin, we use the term Uncertainty Analysis (UA) as an umbrella phrase covering all methods in this eBook. Next, we distinguish the key terms of uncertainty quantification (UQ) and uncertainty characterization (UC). UQ refers to the formal focus on the full specification of likelihoods as well as the distributional forms necessary to infer the joint probabilistic response across all modeled factors of interest [8]. UC refers to exploratory modeling of alternative hypotheses to understand the co-evolutionary dynamics of influences and stressors, as well as path dependent changes in the form and function of modelled systems [9, 10]. As discussed in later sections, the choice of UC or UQ depends on the specific goals of studies, the availability of data, the types of uncertainties (e.g., well-characterized or deep), and the complexity of underlying models as well as computational limits. Definitions of key uncertainty analysis terms used in this eBook appear below, and our Glossary (glossary) contains a complete list of terms.

+
    +
  • Exploratory modeling: Use of large ensembles of uncertain conditions to discover decision-relevant combinations of uncertain factors

  • +
  • Factor: Any model component that can affect model outputs: inputs, resolution levels, coupling relationships, model relationships and parameters. In models with acceptable model fidelity these factors may represent elements of the real-world system under study.

  • +
  • +
    Sensitivity analysis: Model evaluation to understand the factors and processes that most (or least) control a model’s outputs
      +
    • Local sensitivity analysis: Varying uncertain factors around specific reference values

    • +
    • Global sensitivity analysis: Varying uncertain factors throughout their entire feasible value space

    • +
    +
    +
    +
  • +
  • Uncertainty characterization: Model evaluation under alternative factor hypotheses to explore their implications for model output uncertainty

  • +
  • Uncertainty quantification: Representation of model output uncertainty using probability distributions

  • +
+

At present, there is no singular guide for confronting the computational and conceptual challenges of the multi-model, transdisciplinary workflows that characterize ambitious projects such as IM3 [11]. The primary aim of this text is to begin to address this gap and provide guidance for facing these challenges. Chapter 2 provides an overview of diagnostic modeling and the different perspectives for how we should evaluate our models, Chapter 3 summarizes basic methods and concepts for sensitivity analysis, and Chapter 4 delves into more technical applications of sensitivity analysis to support diagnostic model evaluation and exploratory modeling. Finally, Chapter 5 provides some concluding remarks across the UC and UQ topics covered in this text. The appendices of this text include a glossary of the key concepts, an overview of UQ methods, and coding-based illustrative examples of key UC concepts discussed in earlier chapters.

+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/2.1_overview_of_model_diagnostics.html b/dev/docs/html/2.1_overview_of_model_diagnostics.html new file mode 100644 index 0000000..9486375 --- /dev/null +++ b/dev/docs/html/2.1_overview_of_model_diagnostics.html @@ -0,0 +1,461 @@ + + + + + + + + + + + Overview of model diagnostics — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Overview of model diagnostics

+ +
+
+ +
+
+
+ + + + +
+ +
+

Overview of model diagnostics#

+

Model diagnostics provide a rich basis for hypothesis testing, model innovation, and improved inferences when classifying what is controlling highly consequential results (e.g., vulnerability or resilience in coupled human-natural systems). Fig. 2.1, adapted from [6], presents idealized illustrations of the relationship between UC and global sensitivity analysis for two coupled simulation models. The figure illustrates how UC can be used to address how uncertainties in various modeling decisions (e.g., data inputs, parameters, model structures, coupling relationships) can be sampled and simulated to yield the empirical model output distribution(s) of interest. Monte Carlo frameworks allow us to sample and propagate (or integrate) the ensemble response of the model(s) of focus. The first step of any UC analysis is the specification of the initial input distributions as illustrated in Fig. 2.1. The second step is to perform the Monte Carlo simulations. The question can then be raised, which of the modeling assumptions in our Monte Carlo experiment are the most responsible for the resulting output uncertainty. We can answer this question using global sensitivity analysis as illustrated in Fig. 2.1. Global sensitivity analysis can be defined as a formal Monte Carlo sampling and analysis of modeling choices (structures, parameters, inputs) to quantify their influence on direct model outputs (or output-informed metrics). UC experiments by themselves do not explain why a particular uncertain outcome is produced, but produce distributions of model outcomes, as portrayed by the yellow curve. The pie chart shown in Fig. 2.1 is a conceptual representation of the results of using a global sensitivity analysis to identify those factors that are most dominantly influencing results, either individually or interactively [14].

+
+Figure 2.1 + +
+

Idealized uncertainty characterization and global sensitivity analysis for two coupled simulation models. Uncertainty coming from various sources (e.g., inputs, model structures, coupling relationships) is propagated through the coupled model(s) to generate empirical distributions of outputs of interest (uncertainty characterization). This model output uncertainty can be decomposed to its origins, by means of sensitivity analysis. Figure adapted from Saltelli et al. [6].#

+
+
+

UC and global sensitivity analysis are not independent modeling analyses. As illustrated here, any global sensitivity analysis requires an initial UC hypothesis in the form of statistical assumptions and representations for the modeling choices of focus (structural, parametric, and data inputs). Information from these two model diagnostic tools can then be used to inform data needs for future model runs, experiments to reduce the uncertainty present, or the simplification or enhancement of the model where necessary. Together UC and global sensitivity analysis provide a foundation for diagnostic exploratory modeling that has a consistent focus on the assumptions, structural model forms, alternative parameterizations, and input data sets that are used to characterize the behavioral space of one or more models.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/2.2_perspectives_on_diagnostic_model_evaluation.html b/dev/docs/html/2.2_perspectives_on_diagnostic_model_evaluation.html new file mode 100644 index 0000000..e7244c6 --- /dev/null +++ b/dev/docs/html/2.2_perspectives_on_diagnostic_model_evaluation.html @@ -0,0 +1,459 @@ + + + + + + + + + + + Perspectives on diagnostic model evaluation — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Perspectives on diagnostic model evaluation

+ +
+
+ +
+
+
+ + + + +
+ +
+

Perspectives on diagnostic model evaluation#

+

When we judge or diagnose models, the terms “verification” and “validation” are commonly used. However, their appropriateness in the context of numerical models representing complex coupled human-natural systems is questionable [15, 16]. The core issue relates to the fact that these systems are often not fully known or perfectly implemented when modeled. Rather, they are defined within specific system framings and boundary conditions in an evolving learning process with the goal of making continual progress towards attaining higher levels of fidelity in capturing behaviors or properties of interest. Evaluating the fidelity of a model’s performance can be highly challenging. For example, the observations used to evaluate the fidelity of parameterized processes are often measured at a finer resolution than what is represented in the model, creating the challenge of how to manage their relative scales when performing evaluation. In other cases, numerical models may neglect or simplify system processes because sufficient data is not available or the physical mechanisms are not fully known. If sufficient agreement between prediction and observation is not achieved, it is challenging to know whether these types of modeling choices are the cause, or if other issues, such as deficiencies in the input parameters and/or other modeling assumptions are the true cause of errors. Even if there is high agreement between prediction and observation, the model cannot necessarily be considered validated, as it is always possible that the right values were produced for the wrong reasons. For example, low error can stem from a situation where different errors in underlying assumptions or parameters cancel each other out (“compensatory errors”). Furthermore, coupled human-natural system models are often subject to “equifinality”, a situation where multiple parameterized formulations can produce similar outputs or equally acceptable representations of the observed data. There is therefore no uniquely “true” or validated model, and the common practice of selecting “the best” deterministic calibration set is more of an assumption than a finding [17, 18]. The situation becomes even more tenuous when observational data is limited in its scope and/or quality to be insufficient to distinguish model representations or their performance differences.

+

These limitations on model verification undermine any purely positivist treatment of model validity: that a model should correctly and precisely represent reality to be valid. Under this perspective, closely related to empiricism, statistical tests should be used to compare the model’s output with observations and only through empirical verification can a model or theory be deemed credible. A criticism to this viewpoint (besides the aforementioned challenges for model verification) is that it reduces the justification of a model to the single criterion of predictive ability and accuracy [19]. Authors have argued that this ignores the explanatory power held in models and other procedures, which can also advance scientific knowledge [20]. These views gave rise to relativist perspectives of science, which instead place more value on model utility in terms of fitness for a specific purpose or inquiry, rather than representational accuracy and predictive ability [21]. This viewpoint appears to be most prevalent among practitioners seeking decision-relevant insights (i.e., inspire new views vs. predict future conditions). The relativist perspective argues for the use of models as heuristics that can enhance our understanding and conceptions of system behaviors or possibilities [22]. In contrast, natural sciences favor a positivist perspective, emphasizing similarity between simulation and observation even in application contexts where it is clear that projections are being made for conditions that have never been observed and the system of focus will have evolved structurally beyond the model representation being employed (e.g., decadal to centennial evolution of human-natural systems).

+

These differences in prevalent perspectives are mirrored in how model validation is defined by the two camps: From the relativist perspective, validation is seen as a process of incremental “confidence building” in a model as a mechanism for insight [23], whereas in natural sciences validation is framed as a way to classify a model as having an acceptable representation of physical reality [16]. Even though the relativist viewpoint does not dismiss the importance of representational accuracy, it does place it within a larger process of establishing confidence through a variety of tools. These tools, not necessarily quantitative, include communicating information between practitioners and modelers, interpreting a multitude of model outputs, and contrasting preferences and viewpoints.

+

On the technical side of the argument, differing views on the methodology of model validation appear as early as in the 1960’s. Naylor and Finger [24] argue that model validation should not be limited to a single metric or test of performance (e.g., a single error metric), but should rather be extended to multiple tests that reflect different aspects of a model’s structure and behavior. This and similar arguments are made in literature to this day [12, 25, 26, 27, 28] and are primarily founded on two premises. First, that even though modelers widely recognize that their models are abstractions of the truth, they still make truth claims based on traditional performance metrics that measure the divergence of their model from observation [28]. Second, that the natural systems mimicked by the models contain many processes that exhibit significant heterogeneity at various temporal and spatial scales. This heterogeneity is lost when a single performance measure is used, as a result of the inherent loss of process information occurring when transitioning from a highly dimensional and interactive system to the dimension of a single metric [15]. These arguments are further elaborated in Chapter 4.

+

Multiple authors have proposed that the traditional reliance on single measures of model performance should be replaced by the evaluation of several model signatures (characteristics) to identify model structural errors and achieve a sufficient assessment of model performance [12, 29, 30, 31]. There is however a point of departure here, especially when models are used to produce inferences that can inform decisions. When agencies and practitioners use models of their systems for public decisions, those models have already met sufficient conditions for credibility (e.g., acceptable representational fidelity), but may face broader tests on their salience and legitimacy in informing negotiated decisions [22, 32, 33]. This presents a new challenge to model validation, that of selecting decision-relevant performance metrics, reflective of the system’s stakeholders’ viewpoints, so that the most consequential uncertainties are identified and addressed [34]. For complex multisector models at the intersection of climatic, hydrologic, agricultural, energy, or other processes, the output space is made up of a multitude of states and variables, with very different levels of salience to the system’s stakeholders and to their goals being achieved [35]. This is further complicated when such systems are also institutionally and dynamically complex. As a result, a broader set of qualitative and quantitative performance metrics is necessary to evaluate models of such complex systems, one that embraces the plurality of value systems, agencies and perspectives present. For IM3, even though the goal is to develop better projections of future vulnerability and resilience in co-evolving human-natural systems and not to provide decision support per se, it is critical for our multisector, multiscale model evaluation processes to represent stakeholders’ adaptive decision processes credibly.

+

As a final point, when a model is used in a projection mode, its results are also subject to additional uncertainty, as there is no guarantee that the model’s functionality and predictive ability will stay the same as the baseline, where the verification and validation tests were conducted. This challenge requires an additional expansion of the scope of model evaluation: a broader set of uncertain conditions needs to be explored, spanning beyond historical observation and exploring a wide range of unprecedented conditions. This perspective on modeling, termed exploratory [36], views models as computational experiments that can be used to explore vast ensembles of potential scenarios to identify those with consequential effects. Exploratory modeling literature explicitly orients experiments toward stakeholder consequences and decision-relevant inferences and shifts the focus from predicting future conditions to discovering which conditions lead to undesirable or desirable consequences.

+

This evolution in modeling perspectives can be mirrored by the IM3 family of models in a progression from evaluating models relative to observed history to advanced formalized analyses to make inferences on multisector, multiscale vulnerabilities and resilience. Exploratory modeling approaches can help fashion experiments with large numbers of alternative hypotheses on the co-evolutionary dynamics of influences, stressors, as well as path-dependent changes in the form and function of human-natural systems [37]. The aim of this text is to therefore guide the reader through the use of sensitivity analysis (SA) methods across these perspectives on diagnostic and exploratory modeling.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/2_diagnostic_modeling_overview_and_perspectives.html b/dev/docs/html/2_diagnostic_modeling_overview_and_perspectives.html new file mode 100644 index 0000000..6e40b38 --- /dev/null +++ b/dev/docs/html/2_diagnostic_modeling_overview_and_perspectives.html @@ -0,0 +1,533 @@ + + + + + + + + + + + 2. Diagnostic Modeling Overview and Perspectives — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Diagnostic Modeling Overview and Perspectives

+ +
+ +
+
+ + + + +
+ +
+

2. Diagnostic Modeling Overview and Perspectives#

+

This text prescribes a formal model diagnostic approach that is a deliberative and iterative combination of state-of-the-art UC and global sensitivity analysis techniques that progresses from observed history-based fidelity evaluations to forward looking resilience and vulnerability inferences [12, 13].

+
+

2.1. Overview of model diagnostics#

+

Model diagnostics provide a rich basis for hypothesis testing, model innovation, and improved inferences when classifying what is controlling highly consequential results (e.g., vulnerability or resilience in coupled human-natural systems). Fig. 2.1, adapted from [6], presents idealized illustrations of the relationship between UC and global sensitivity analysis for two coupled simulation models. The figure illustrates how UC can be used to address how uncertainties in various modeling decisions (e.g., data inputs, parameters, model structures, coupling relationships) can be sampled and simulated to yield the empirical model output distribution(s) of interest. Monte Carlo frameworks allow us to sample and propagate (or integrate) the ensemble response of the model(s) of focus. The first step of any UC analysis is the specification of the initial input distributions as illustrated in Fig. 2.1. The second step is to perform the Monte Carlo simulations. The question can then be raised, which of the modeling assumptions in our Monte Carlo experiment are the most responsible for the resulting output uncertainty. We can answer this question using global sensitivity analysis as illustrated in Fig. 2.1. Global sensitivity analysis can be defined as a formal Monte Carlo sampling and analysis of modeling choices (structures, parameters, inputs) to quantify their influence on direct model outputs (or output-informed metrics). UC experiments by themselves do not explain why a particular uncertain outcome is produced, but produce distributions of model outcomes, as portrayed by the yellow curve. The pie chart shown in Fig. 2.1 is a conceptual representation of the results of using a global sensitivity analysis to identify those factors that are most dominantly influencing results, either individually or interactively [14].

+
+Figure 2.1 + +
+

Fig. 2.1 Idealized uncertainty characterization and global sensitivity analysis for two coupled simulation models. Uncertainty coming from various sources (e.g., inputs, model structures, coupling relationships) is propagated through the coupled model(s) to generate empirical distributions of outputs of interest (uncertainty characterization). This model output uncertainty can be decomposed to its origins, by means of sensitivity analysis. Figure adapted from Saltelli et al. [6].#

+
+
+

UC and global sensitivity analysis are not independent modeling analyses. As illustrated here, any global sensitivity analysis requires an initial UC hypothesis in the form of statistical assumptions and representations for the modeling choices of focus (structural, parametric, and data inputs). Information from these two model diagnostic tools can then be used to inform data needs for future model runs, experiments to reduce the uncertainty present, or the simplification or enhancement of the model where necessary. Together UC and global sensitivity analysis provide a foundation for diagnostic exploratory modeling that has a consistent focus on the assumptions, structural model forms, alternative parameterizations, and input data sets that are used to characterize the behavioral space of one or more models.

+
+
+

2.2. Perspectives on diagnostic model evaluation#

+

When we judge or diagnose models, the terms “verification” and “validation” are commonly used. However, their appropriateness in the context of numerical models representing complex coupled human-natural systems is questionable [15, 16]. The core issue relates to the fact that these systems are often not fully known or perfectly implemented when modeled. Rather, they are defined within specific system framings and boundary conditions in an evolving learning process with the goal of making continual progress towards attaining higher levels of fidelity in capturing behaviors or properties of interest. Evaluating the fidelity of a model’s performance can be highly challenging. For example, the observations used to evaluate the fidelity of parameterized processes are often measured at a finer resolution than what is represented in the model, creating the challenge of how to manage their relative scales when performing evaluation. In other cases, numerical models may neglect or simplify system processes because sufficient data is not available or the physical mechanisms are not fully known. If sufficient agreement between prediction and observation is not achieved, it is challenging to know whether these types of modeling choices are the cause, or if other issues, such as deficiencies in the input parameters and/or other modeling assumptions are the true cause of errors. Even if there is high agreement between prediction and observation, the model cannot necessarily be considered validated, as it is always possible that the right values were produced for the wrong reasons. For example, low error can stem from a situation where different errors in underlying assumptions or parameters cancel each other out (“compensatory errors”). Furthermore, coupled human-natural system models are often subject to “equifinality”, a situation where multiple parameterized formulations can produce similar outputs or equally acceptable representations of the observed data. There is therefore no uniquely “true” or validated model, and the common practice of selecting “the best” deterministic calibration set is more of an assumption than a finding [17, 18]. The situation becomes even more tenuous when observational data is limited in its scope and/or quality to be insufficient to distinguish model representations or their performance differences.

+

These limitations on model verification undermine any purely positivist treatment of model validity: that a model should correctly and precisely represent reality to be valid. Under this perspective, closely related to empiricism, statistical tests should be used to compare the model’s output with observations and only through empirical verification can a model or theory be deemed credible. A criticism to this viewpoint (besides the aforementioned challenges for model verification) is that it reduces the justification of a model to the single criterion of predictive ability and accuracy [19]. Authors have argued that this ignores the explanatory power held in models and other procedures, which can also advance scientific knowledge [20]. These views gave rise to relativist perspectives of science, which instead place more value on model utility in terms of fitness for a specific purpose or inquiry, rather than representational accuracy and predictive ability [21]. This viewpoint appears to be most prevalent among practitioners seeking decision-relevant insights (i.e., inspire new views vs. predict future conditions). The relativist perspective argues for the use of models as heuristics that can enhance our understanding and conceptions of system behaviors or possibilities [22]. In contrast, natural sciences favor a positivist perspective, emphasizing similarity between simulation and observation even in application contexts where it is clear that projections are being made for conditions that have never been observed and the system of focus will have evolved structurally beyond the model representation being employed (e.g., decadal to centennial evolution of human-natural systems).

+

These differences in prevalent perspectives are mirrored in how model validation is defined by the two camps: From the relativist perspective, validation is seen as a process of incremental “confidence building” in a model as a mechanism for insight [23], whereas in natural sciences validation is framed as a way to classify a model as having an acceptable representation of physical reality [16]. Even though the relativist viewpoint does not dismiss the importance of representational accuracy, it does place it within a larger process of establishing confidence through a variety of tools. These tools, not necessarily quantitative, include communicating information between practitioners and modelers, interpreting a multitude of model outputs, and contrasting preferences and viewpoints.

+

On the technical side of the argument, differing views on the methodology of model validation appear as early as in the 1960’s. Naylor and Finger [24] argue that model validation should not be limited to a single metric or test of performance (e.g., a single error metric), but should rather be extended to multiple tests that reflect different aspects of a model’s structure and behavior. This and similar arguments are made in literature to this day [12, 25, 26, 27, 28] and are primarily founded on two premises. First, that even though modelers widely recognize that their models are abstractions of the truth, they still make truth claims based on traditional performance metrics that measure the divergence of their model from observation [28]. Second, that the natural systems mimicked by the models contain many processes that exhibit significant heterogeneity at various temporal and spatial scales. This heterogeneity is lost when a single performance measure is used, as a result of the inherent loss of process information occurring when transitioning from a highly dimensional and interactive system to the dimension of a single metric [15]. These arguments are further elaborated in Chapter 4.

+

Multiple authors have proposed that the traditional reliance on single measures of model performance should be replaced by the evaluation of several model signatures (characteristics) to identify model structural errors and achieve a sufficient assessment of model performance [12, 29, 30, 31]. There is however a point of departure here, especially when models are used to produce inferences that can inform decisions. When agencies and practitioners use models of their systems for public decisions, those models have already met sufficient conditions for credibility (e.g., acceptable representational fidelity), but may face broader tests on their salience and legitimacy in informing negotiated decisions [22, 32, 33]. This presents a new challenge to model validation, that of selecting decision-relevant performance metrics, reflective of the system’s stakeholders’ viewpoints, so that the most consequential uncertainties are identified and addressed [34]. For complex multisector models at the intersection of climatic, hydrologic, agricultural, energy, or other processes, the output space is made up of a multitude of states and variables, with very different levels of salience to the system’s stakeholders and to their goals being achieved [35]. This is further complicated when such systems are also institutionally and dynamically complex. As a result, a broader set of qualitative and quantitative performance metrics is necessary to evaluate models of such complex systems, one that embraces the plurality of value systems, agencies and perspectives present. For IM3, even though the goal is to develop better projections of future vulnerability and resilience in co-evolving human-natural systems and not to provide decision support per se, it is critical for our multisector, multiscale model evaluation processes to represent stakeholders’ adaptive decision processes credibly.

+

As a final point, when a model is used in a projection mode, its results are also subject to additional uncertainty, as there is no guarantee that the model’s functionality and predictive ability will stay the same as the baseline, where the verification and validation tests were conducted. This challenge requires an additional expansion of the scope of model evaluation: a broader set of uncertain conditions needs to be explored, spanning beyond historical observation and exploring a wide range of unprecedented conditions. This perspective on modeling, termed exploratory [36], views models as computational experiments that can be used to explore vast ensembles of potential scenarios to identify those with consequential effects. Exploratory modeling literature explicitly orients experiments toward stakeholder consequences and decision-relevant inferences and shifts the focus from predicting future conditions to discovering which conditions lead to undesirable or desirable consequences.

+

This evolution in modeling perspectives can be mirrored by the IM3 family of models in a progression from evaluating models relative to observed history to advanced formalized analyses to make inferences on multisector, multiscale vulnerabilities and resilience. Exploratory modeling approaches can help fashion experiments with large numbers of alternative hypotheses on the co-evolutionary dynamics of influences, stressors, as well as path-dependent changes in the form and function of human-natural systems [37]. The aim of this text is to therefore guide the reader through the use of sensitivity analysis (SA) methods across these perspectives on diagnostic and exploratory modeling.

+
+

Note

+

The following articles are suggested as fundamental reading for the information presented in this section:

+
    +
  • Naomi Oreskes, Kristin Shrader–Frechette, and Kenneth Belitz. Verification, Validation, and Confirmation of Numerical Models in the Earth Sciences. Science, 263 (5147): 641-646, February 1994. URL: https://science.sciencemag.org/content/263/5147/641. DOI: https://doi.org/10.1126/science.263.5147.641.

  • +
  • Keith Beven. Towards a coherent philosophy for modelling the environment. Proceedings of the Royal Society of London. Series A: mathematical, physical and engineering sciences, 458 (2026): 2465-2484, 2002.

  • +
  • Eker, S., Rovenskaya, E., Obersteiner, M., Langan, S., 2018. Practice and perspectives in the validation of resource management models. Nature Communications 9, 1–10. https://doi.org/10.1038/s41467-018-07811-9

  • +
+

The following articles can be used as supplemental reading:

+ +
+
+
+ + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.1_global_versus_local_sensitivity.html b/dev/docs/html/3.1_global_versus_local_sensitivity.html new file mode 100644 index 0000000..cecc8dd --- /dev/null +++ b/dev/docs/html/3.1_global_versus_local_sensitivity.html @@ -0,0 +1,463 @@ + + + + + + + + + + + Global Versus Local Sensitivity — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Global Versus Local Sensitivity

+ +
+
+ +
+
+
+ + + + +
+ +
+

Global Versus Local Sensitivity#

+

Out of the several definitions for sensitivity analysis presented in the literature, the most widely used has been proposed by Saltelli et al. [38] as “the study of how uncertainty in the output of a model (numerical or otherwise) can be apportioned to different sources of uncertainty in the model input”. In other words, sensitivity analysis explores the relationship between the model’s \(N\) input variables, \(x=[x_1,x_2,...,x_N]\), and \(M\) output variables, \(y=[y_1,y_2,...,y_M]\) with \(y=g(x)\), where \(g\) is the model that maps the model inputs to the outputs [39].

+

Historically, there have been two broad categories of sensitivity analysis techniques: local and global. Local sensitivity analysis is performed by varying model parameters around specific reference values, with the goal of exploring how small input perturbations influence model performance. Due to its ease-of-use and limited computational demands, this approach has been widely used in literature, but has important limitations [40, 41]. If the model is not linear, the results of local sensitivity analysis can be heavily biased, as they are strongly influenced by independence assumptions and a limited exploration of model inputs (e.g., Tang et al. [42]). If the model’s factors interact, local sensitivity analysis will underestimate their importance, as it does not account for those effects (e.g., [43]). In general, as local sensitivity analysis only partially and locally explores a model’s parametric space, it is not considered a valid approach for nonlinear models [44]. This is illustrated in Fig. 3.1 (a-b), presenting contour plots of a model response (\(y\)) with an additive linear model (a) and with a nonlinear model (b). In a linear model without interactions between the input terms \(x_1\) and \(x_2\), local sensitivity analysis (assuming deviations from some reference values) can produce appropriate sensitivity indices (Fig. 3.1 (a)). If however, factors \(x_1\) and \(x_2\) interact, the local and partial consideration of the space can not properly account for each factor’s effects on the model response (Fig. 3.1 (b)), as it is only informative at the reference value where it is applied. In contrast, a global sensitivity analysis varies uncertain factors within the entire feasible space of variable model responses (Fig. 3.1 (c)). This approach reveals the global effects of each parameter on the model output, including any interactive effects. For models that cannot be proven linear, global sensitivity analysis is preferred and this text is primarily discussing global sensitivity analysis methods. In the text that follows, whenever we use the term sensitivity analysis we are referring to its global application.

+
+Figure 3.1 + +
+

Treatment of a two-dimensional space of variability by local (panels a-b) and global (panel c) sensitivity analyses. Panels depict contour plots with the value of a model response (\(y\)) changing with changes in the values of input terms \(x_1\) and \(x_2\). Local sensitivity analysis is only an appropriate approach to sensitivity in the case of linear models without interactions between terms, for example in panel (a), where \(y=3x_1+5x_2\). In the case of more complex models, for example in panels (b-c), where \(y={1 \above 1pt e^{x^2_1+x^2_2}} + {50 \above 1pt e^{(0.1x_1)^2+(0.1x_2)^3}}\), local sensitivity will miscalculate sensitivity indices as the assessed changes in the value \(y\) depend on the assumed base values chose for \(x_1\) and \(x_2\) (panel (b)). In these cases, global sensitivity methods should be used instead (panel (c)). The points in panel (c) are generated using a uniform random sample of \(n=50\), but many other methods are available.#

+
+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.2_why_perform_sensitivity_analysis.html b/dev/docs/html/3.2_why_perform_sensitivity_analysis.html new file mode 100644 index 0000000..ce0bb65 --- /dev/null +++ b/dev/docs/html/3.2_why_perform_sensitivity_analysis.html @@ -0,0 +1,471 @@ + + + + + + + + + + + Why Perform Sensitivity Analysis — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Why Perform Sensitivity Analysis

+ +
+
+ +
+
+
+ + + + +
+ +
+

Why Perform Sensitivity Analysis#

+

It is important to understand the many ways in which a SA might be of use to your modeling effort. Most commonly, one might be motivated to perform sensitivity analysis for the following reasons:

+

Model evaluation: Sensitivity analysis can be used to gauge model inferences when assumptions about the structure of the model or its parameterization are dubious or have changed. For instance, consider a numerical model that uses a set of calibrated parameter values to produce outputs, which we then use to inform decisions about the real-world system represented. One might like to know if small changes in these parameter values significantly change this model’s output and the decisions it informs or if, instead, our parameter inferences yield stable model behavior regardless of the uncertainty present in the specific parameterized processes or properties. This can either discredit or lend credence to the model at hand, as well as any inferences drawn that are founded on its accurate representation of the system. Sensitivity analysis can identify which uncertain model factors cause this undesirable model behavior.

+

Model simplification: Sensitivity analysis can also be used to identify factors or components of the model that appear to have limited effects on direct outputs or metrics of interest. Consider a model that has been developed in an organization for the purposes of a specific research question and is later used in the context of a different application. Some processes represented in significant detail might no longer be of the same importance while consuming significant data or computational resources, as different outputs might be pertinent to the new application. Sensitivity analysis can be used to identify unimportant model components and simplify them to nominal values and reduced model forms. Model complexity and computational costs can therefore be reduced.

+

Model refinement: Alternatively, sensitivity analysis can reveal the factors or processes that are highly influential to the outputs or metrics of interest, by assessing their relative importance. In the context of model evaluation, this can inform which model components warrant additional investigation or measurement so the uncertainty surrounding them and the resulting model outputs or metrics of interest can be reduced.

+

Exploratory modeling: When sufficient credence has been established in the model, sensitivity analysis can be applied to a host of other inquiries. Inferences about the factors and processes that most (or least) control a model’s outputs of interest can be extrapolated to the real system they represent and be used in a heuristic manner to inform model-based inferences. On this foundation, a model paired with the advanced techniques presented in this text can be used to “discover” decision relevant and highly consequential outcomes (i.e., scenario discovery, discussed in more detail in Chapter 4.3 [36, 45]).

+

The nature and context of the model shapes the specific objectives of applying a sensitivity analysis, as well as methods and tools most appropriate and defensible for each application setting [35, 38, 46]. The three most common sensitivity analysis modes (Factor Prioritization, Factor Fixing, and Factor Mapping) are presented below, but the reader should be aware that other uses have been proposed in the literature (e.g., [47, 48]).

+

Factor prioritization: This sensitivity analysis application mode (also referred to as factor ranking) refers to when one would like to identify the uncertain factors that have the greatest impact on the variability of the output, and which, when fixed to their true value (i.e., if there were no uncertainty regarding their value), would lead to the greatest reduction in output variability [49]. Information from this type of analysis can be crucial to model improvement as these factors can become the focus of future measurement campaigns or numerical experiments so that uncertainty in the model output can be reduced. The impact of each uncertain input on the variance of the model output is often used as the criterion for factor prioritization. Fig. 3.2 (a) shows the effects of three uncertain variables (\(X_1\), \(X_2\), and \(X_3\)) on the variance of output \(Y\). \(V(E(Y|X_i))\) indicates the variance in \(Y\) if factor \(X_i\) is left to vary freely while all other factors remain fixed to nominal values. In this case, factor \(X_2\) makes the largest contribution to the variability of output \(Y\) and it should therefore be prioritized. In the context of risk analysis, factor prioritization can be used to reduce output variance to below a given tolerable threshold (also known as variance cutting).

+

Factor fixing: This mode of sensitivity analysis (also referred to as factor screening) aims to identify the model components that have a negligible effect or make no significant contributions to the variability of the outputs or metrics of interest (usually referred to as non-influential [49]). In the stylized example of Fig. 3.2 (a), \(X_1\) makes the smallest contribution to the variability of output \(Y\) suggesting that the uncertainty in its value could be negligible and the factor itself fixed in subsequent model executions. Eliminating these factors or processes in the model or fixing them to a nominal value can help reduce model complexity as well as the unnecessary computational burden of subsequent model runs, results processing, or other sensitivity analyses (the fewer uncertain factors considered, the fewer runs are necessary to illuminate their effects on the output). Significance of the outcome can be gauged in a variety of manners, depending on the application. For instance, if applying a variance-based method, a minimum threshold value of contribution to the variance could be considered as a significance ‘cutoff’, and factors with indices below that value can be considered non-influential. Conclusions about factor fixing should be made carefully, considering all of the effects a factor has, individually and in interaction with other factors (explained in more detail in the Chapter 3.4.5).

+

Factor mapping: Finally, factor mapping can be used to pinpoint which values of uncertain factors lead to model outputs within a given range of the output space [49]. In the context of model diagnostics, it is possible that the model’s output changes in ways considered impossible based on the represented processes, or other observed evidence. In this situation, factor mapping can be used to identify which uncertain model factors cause this undesirable model behavior by ‘filtering’ model runs that are considered ‘non-behavioral’ [50, 51, 52]. In Fig. 3.2 (b), region \(B\) of the output space \(Y\) denotes the set of behavioral model outcomes and region \(\bar{B}\) denotes the set of non-behavioral outcomes, resulting from the entirety of input space \(X\). Factor mapping refers to the process of tracing which factor values of input space \(X\) produce the behavioral model outcomes in the output space.

+
+Figure 3.2 + +
+

Factor prioritization, factor fixing and factor mapping settings of sensitivity analysis.#

+
+
+

The language used above reflects a use of sensitivity analysis for model fidelity evaluation and refinement. However, as previously mentioned, when a model has been established as a sufficiently accurate representation of the system, sensitivity analysis can produce additional inferences (i.e., exploratory modeling and scenario discovery). For instance, under the factor mapping use, the analyst can now focus on undesirable system states and discover which factors are most responsible for them: for instance, “population growth of above 25% would be responsible for unacceptably high energy demands”. Factor prioritization and factor fixing can be used to make equivalent inferences, such as “growing populations and increasing temperatures are the leading factors for changing energy demands” (prioritizing of factors) or “changing dietary needs are inconsequential to increasing energy demands for this region” (a factor that can be fixed in subsequent model runs). All these inferences hinge on the assumption that the real system’s stakeholders consider the model states faithful enough representations of system states. As elaborated in Chapter 2.2, this view on sensitivity analysis is founded on a relativist perspective on modeling, which tends to place more value on model usefulness rather than strict accuracy of representation in terms of error. As such, sensitivity analysis performed with decision-making relevance in mind will focus on model outputs or metrics that are consequential and decision relevant (e.g., energy demand in the examples above).

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.3.1_one_at_a_time_oat.html b/dev/docs/html/3.3.1_one_at_a_time_oat.html new file mode 100644 index 0000000..ebd2160 --- /dev/null +++ b/dev/docs/html/3.3.1_one_at_a_time_oat.html @@ -0,0 +1,453 @@ + + + + + + + + + + + One-At-a-Time (OAT) — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

One-At-a-Time (OAT)

+ +
+
+ +
+
+
+ + + + +
+ +
+

One-At-a-Time (OAT)#

+

In this approach, only one model factor is changed at a time while all others are kept fixed across each iteration in a sampling sequence. The OAT method assumes that model factors of focus are linearly independent (i.e., there are no interactions) and can analyze how factors individually influence model outputs or metrics of interest. While popular given its ease of implementation, OAT is ultimately limited in its exploration of a model’s sensitivities [49]. It is primarily used with local sensitivity techniques with similar criticisms: applying this sampling scheme on a system with nonlinear and interactive processes will miss important information on the effect uncertain factors have on the model. OAT samplings can be repeated multiple times in a more sophisticated manner and across different locations of the parameter space to overcome some of these challenges, which would increase computational costs and negate the main reasons for its selection. Given these limitations OAT methods could be used as preliminary, low-cost analyses of the factors’ individual effects, but should ultimately be complemented with more sophisticated methods.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.3.2_full_fractional_factorial_sampling.html b/dev/docs/html/3.3.2_full_fractional_factorial_sampling.html new file mode 100644 index 0000000..aedde6f --- /dev/null +++ b/dev/docs/html/3.3.2_full_fractional_factorial_sampling.html @@ -0,0 +1,463 @@ + + + + + + + + + + + Full and Fractional Factorial Sampling — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Full and Fractional Factorial Sampling

+ +
+
+ +
+
+
+ + + + +
+ +
+

Full and Fractional Factorial Sampling#

+

In full factorial sampling each factor is treated as being discrete by considering two or more levels (or intervals) of its values. The sampling process then generates samples within each possible combination of levels, corresponding to each parameter. This scheme produces a more comprehensive sampling of the factors’ variability space, as it accounts for all candidate combinations of factor levels (Fig. 3.3 (a)). If the number of levels is the same across all factors, the number of generated samples is estimated using \(n^k\), where \(n\) is the number of levels and \(k\) is the number of factors. For example, Fig. 3.3 (a) presents a full factorial sampling of three uncertain factors \((x_1,\) \(x_2,\) and \(x_3)\), each considered as having four discrete levels. The total number of samples necessary for such an experiment is \(4^3=64\). As the number of factors increases, the number of simulations necessary will also grow exponentially, making full factorial sampling computationally burdensome (Fig. 3.3 (b)). As a result, it is common in the literature to apply full factorial sampling at only two levels per factor, typically the two extremes [60]. This significantly reduces computational burden but is only considered appropriate in cases where factors can indeed only assume two discrete values (e.g., when testing the effects of epistemic uncertainty and comparing between model structure A and model structure B). In the case of physical parameters on continuous distributions (e.g., when considering the effects of measurement uncertainty in a temperature sensor), discretizing the range of a factor to only extreme levels can bias its estimated importance.

+

Fractional factorial sampling is a widely used alternative to full factorial sampling that allows the analyst to significantly reduce the number of simulations by focusing on the main effects of a factor and seeking to avoid model runs that yield redundant response information [49]. In other words, if one can reasonably assume that higher-order interactions are negligible, information about the most significant effects and lower-order interactions (e.g., effects from pairs of factors) can be obtained using a fraction of the full factorial design. Traditionally, fractional factorial design has also been limited to two levels [60], referred to as Fractional Factorial designs 2k-p [61]. Recently, Generalized Fractional Factorial designs have also been proposed that allow for the structured generation of samples at more than two levels per factor [62]. Consider a case where the modeling team dealing with the problem in Fig. 3.3 (a) cannot afford to perform 64 simulations of their model. They can afford 32 runs for their experiment and instead decide to fractionally sample the variability space of their factors. A potential design of such a sampling strategy is presented in Fig. 3.3 (c).

+
+Figure 3.3 + +
+

Alternative designs of experiments and their computational costs for three uncertain factors \((x_1,\) \(x_2,\) and \(x_3)\). (a) Full factorial design sampling of three factors at four levels, at a total of 64 samples; (b) exponential growth of necessary number of samples when applying full factorial design at four levels; (c) fractional factorial design of three factors at four levels, at a total of 32 samples; and (d) Latin Hypercube sample of three factors with uniform distributions, at a total of 32 samples.#

+
+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.3.3_latin_hypercube_sampling.html b/dev/docs/html/3.3.3_latin_hypercube_sampling.html new file mode 100644 index 0000000..2ae737d --- /dev/null +++ b/dev/docs/html/3.3.3_latin_hypercube_sampling.html @@ -0,0 +1,456 @@ + + + + + + + + + + + Latin Hypercube Sampling (LHS) — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Latin Hypercube Sampling (LHS)

+ +
+
+ +
+
+
+ + + + +
+ +
+

Latin Hypercube Sampling (LHS)#

+

Latin hypercube sampling (LHS) [63] is one of the most common methods in space-filling experimental designs. With this sampling technique, for \(N\) uncertain factors, an \(N\)-dimensional hypercube is generated, with each factor divided into an equal number of levels depending on the total number of samples to be generated. Equal numbers of samples are then randomly generated at each level, across all factors. In this manner, latin hypercube design guarantees sampling from every level of the variability space and without any overlaps. When the number of samples generated is much larger than the number of uncertain factors, LHS can be very effective in examining the effects of each factor [49]. LHS is an attractive technique, because it guarantees a diverse coverage of the space, through the use of subintervals, without being constrained to discrete levels for each factor - compare Fig. 3.3 (c) with Fig. 3.3 (d) for the same number of samples.

+

LHS is less effective when the number of samples is not much larger than the number of uncertain factors, and the effects of each factor cannot be appropriately distinguished. The samples between factors can also be highly correlated, biasing any subsequent sensitivity analysis results. To address this, the sampling scheme can be modified to control for the correlation in parameters while maximizing the information derived. An example of such modification is through the use of orthogonal arrays [64].

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.3.4_low_discrepancy_sequences.html b/dev/docs/html/3.3.4_low_discrepancy_sequences.html new file mode 100644 index 0000000..640a016 --- /dev/null +++ b/dev/docs/html/3.3.4_low_discrepancy_sequences.html @@ -0,0 +1,457 @@ + + + + + + + + + + + Low-Discrepancy Sequences — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Low-Discrepancy Sequences

+ +
+
+ +
+
+
+ + + + +
+ +
+

Low-Discrepancy Sequences#

+

Low-discrepancy sequences is another sampling technique that employs a pseudo-random generator for Monte Carlo sampling [65, 66]. These quasi-Monte Carlo methods eliminate ‘lumpiness’ across samples (i.e, the presence of gaps and clusters) by minimizing discrepancy across the hypercube samples. Discrepancy can be quantitatively measured using the deviations of sampled points from a uniform distribution [65, 67]. Low-discrepancy sequences ensure that the number of samples in any subspace of the variability hypercube is approximately the same. This is not something guaranteed by Latin Hypercube sampling, and even though its design can be improved through optimization with various criteria, such adjustments are limited to small sample sizes and low dimensions [67, 68, 69, 70, 71]. In contrast, the Sobol sequence [72, 73], one of the most widely used sampling techniques, utilizes the low-discrepancy approach to uniformly fill the sampled factor space. A core advantage of this style of sampling is that it takes far fewer samples (i.e., simulations) to attain a much lower level of error in estimating model output statistics (e.g., the mean and variance of outputs).

+
+

Note

+

Put this into practice! Click the following link to try out an interactive tutorial which uses Sobol sequence sampling for the purposes of a Sobol sensitivity analysis: Sobol SA using SALib Jupyter Notebook

+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.3.5_other_types_of_sampling.html b/dev/docs/html/3.3.5_other_types_of_sampling.html new file mode 100644 index 0000000..3f3130c --- /dev/null +++ b/dev/docs/html/3.3.5_other_types_of_sampling.html @@ -0,0 +1,455 @@ + + + + + + + + + + + Other types of sampling — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Other types of sampling

+ +
+
+ +
+
+
+ + + + +
+ +
+

Other types of sampling#

+

The sampling techniques mentioned so far are general sampling methods useful for a variety of applications beyond sensitivity analysis. There are however techniques that have been developed for specific sensitivity analysis methods. Examples of these methods include the Morris One-At-a-Time [74], Fourier Amplitude Sensitivity Test (FAST; [75]), Extended FAST [76], and Extended Sobol methods [77]. For example, the Morris sampling strategy builds a number of trajectories (usually referred to as repetitions and denoted by \(r\)) in the input space each composed of \(N+1\) factor points, where \(N\) is the number of uncertain factors. The first point of the trajectory is selected randomly and the subsequent \(N\) points are generated by moving one factor at a time by a fixed amount. Each factor is perturbed once along the trajectory, while the starting points of all of the trajectories are randomly and uniformly distributed. Several variations of this strategy also exist in the literature; for more details on each approach and their differences the reader is directed to Pianosi et al. [51].

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.3.6_synthetic_generation_of_input_time_series.html b/dev/docs/html/3.3.6_synthetic_generation_of_input_time_series.html new file mode 100644 index 0000000..053a8f6 --- /dev/null +++ b/dev/docs/html/3.3.6_synthetic_generation_of_input_time_series.html @@ -0,0 +1,453 @@ + + + + + + + + + + + Synthetic generation of input time series — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Synthetic generation of input time series

+ +
+
+ +
+
+
+ + + + +
+ +
+

Synthetic generation of input time series#

+

Models often have input time series or processes with strong temporal and/or spatial correlations (e.g., streamflow, energy demand, pricing of commodities, etc.) that, while they might not immediately come to mind as factors to be examined in sensitivity analysis, can be treated as such. Synthetic input time series are used for a variety of reasons, for example, when observations are not available or are limited, or when past observations are not considered sufficiently representative to capture rare or extreme events of interest [78, 79]. Synthetic generation of input time series provides a valuable tool to consider non-stationarity and incorporate potential stressors, such as climate change impacts into input time series [80]. For example, a century of record will be insufficient to capture very high impact rare extreme events (e.g., persistent multi-year droughts). A large body of statistical literature exists focusing on the topics of synthetic weather [81, 82] and streamflow [83, 84] generation that provides a rich suite of approaches for developing history-informed, well-characterized stochastic process models to better estimate rare individual or compound (hot, severe drought) extremes. It is beyond the scope of this text to review these methods, but readers are encouraged to explore the studies cited above as well as the following publications for discussions and comparisons of these methods: [78, 80, 85, 86, 87, 88, 89]. The use of these methods for the purposes of exploratory modeling, especially in the context of well-characterized versus deep uncertainty, is further discussed in Chapter 4.3.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.3_design_of_experiments.html b/dev/docs/html/3.3_design_of_experiments.html new file mode 100644 index 0000000..0ab7e97 --- /dev/null +++ b/dev/docs/html/3.3_design_of_experiments.html @@ -0,0 +1,562 @@ + + + + + + + + + + + Design of Experiments — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

Design of Experiments#

+

Before conducting a sensitivity analysis, the first element that needs to be clarified is the uncertainty space of the model [51, 53]. In other words, how many and which factors making up the mathematical model are considered uncertain and can potentially affect the model output and the inferences drawn from it. Uncertain factors can be model parameters, model structures, inputs, or alternative model resolution levels (scales), all of which can be assessed through the tools presented in this text. Depending on the kind of factor, its variability can be elicited through various means: expert opinion, values reported in the literature, historical observations, its physical meaning (e.g., population values in a city can never be negative), or through the use of more formal UQ methods (Chapter A). The model uncertainty space represents the entire space of variability present in each of the uncertain factors of a model. The complexity of most real-world models means that the response function, \(y=g(x)\), mapping inputs to outputs, is hardly ever available in an analytical form and therefore analytically computing the sensitivity of the output to each uncertain factor becomes impossible. In these cases, sensitivity analysis is only feasible through numerical procedures that employ different strategies to sample the uncertainty space and calculate sensitivity indices.

+

A sampling strategy is often referred to as a design of experiments and represents a methodological choice made before conducting any sensitivity analysis. Experimental design was first introduced by Fisher [54] in the context of laboratory or field-based experiments. Its application in sensitivity analysis is similar to setting up a physical experiment in that it is used to discover the behavior of a system under specific conditions. An ideal design of experiments should provide a framework for the extraction of all plausible information about the impact of each factor on the output of the model. The design of experiments is used to set up a simulation platform with the minimum computational cost to answer specific questions that cannot be readily drawn from the data through analytical or common data mining techniques. Models representing coupled human-natural systems usually have a large number of inputs, state variables and parameters, but not all of them exert fundamental control over the numerical process, despite their uncertainty, nor have substantial impacts on the model output, either independently or through their interactions. Each factor influences the model output in different ways that need to be discovered. For example, the influence of a parameter on model output can be linear or non-linear and can be continuous or only be active during specific times or at particular states of the system [55, 56]. An effective and efficient design of experiments allows the analyst to explore these complex relationships and evaluate different behaviors of the model for various scientific questions [57]. The rest of this section overviews some of the most commonly used designs of experiments. Table 1 summarizes the designs discussed.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Summary of designs of experiments overviewed in this section. * Depends on the sample size.#

Design of experiments

Factor interactions considered

Treatment of factor domains

One-At-a-Time (OAT)

No - main effects only

Continuous (distributions)

Full Factorial Sampling

Yes - including total effects

Discrete (levels)

Fractional Factorial Sampling

Yes - only lower-order effects*

Discrete (levels)

Latin Hypercube (LH) Sampling

Yes - including total effects*

Continuous (distributions)

Quasi-Random Sampling with Low-Discrepancy Sequences

Yes - including total effects*

Continuous (distributions)

+
+

There are a few different approaches to the design of experiments, closely related to the chosen sensitivity analysis approach, which is in turn shaped by the research motivations, scientific questions, and computational constraints at hand (additional discussion of this can be found at the end of Chapter 3). For example, in a sensitivity analysis using perturbation and derivatives methods, the model input parameters vary from their nominal values one at a time, something that the design of experiments needs to reflect. If, instead, one were to perform sensitivity analysis using a multiple-starts perturbation method, the design of experiments needs to consider that multiple points across the factor space are used. The design of experiments specifically defines two key characteristics of samples that are fed to the numerical model: the number of samples and the range of each factor.

+

Generally, sampling can be performed randomly or by applying a stratifying approach. In random sampling, such as Monte Carlo [58], samples are randomly generated by a pseudo-random number generator with an a-priori assumption about the distribution of parameters and their possible ranges. Random seeds can also be used to ensure consistency and higher control over the random process. However, this method could leave some gaps in the parameter space and cause clustering in some spaces, especially for a large number of parameters [59]. Most sampling strategies use stratified sampling to mitigate these disadvantages. Stratified sampling techniques divide the domain of each factor into subintervals, often of equal lengths. From each subinterval, an equal number of samples is drawn randomly, or based on the specific locations within the subintervals [49].

+
+

One-At-a-Time (OAT)#

+

In this approach, only one model factor is changed at a time while all others are kept fixed across each iteration in a sampling sequence. The OAT method assumes that model factors of focus are linearly independent (i.e., there are no interactions) and can analyze how factors individually influence model outputs or metrics of interest. While popular given its ease of implementation, OAT is ultimately limited in its exploration of a model’s sensitivities [49]. It is primarily used with local sensitivity techniques with similar criticisms: applying this sampling scheme on a system with nonlinear and interactive processes will miss important information on the effect uncertain factors have on the model. OAT samplings can be repeated multiple times in a more sophisticated manner and across different locations of the parameter space to overcome some of these challenges, which would increase computational costs and negate the main reasons for its selection. Given these limitations OAT methods could be used as preliminary, low-cost analyses of the factors’ individual effects, but should ultimately be complemented with more sophisticated methods.

+
+
+

Full and Fractional Factorial Sampling#

+

In full factorial sampling each factor is treated as being discrete by considering two or more levels (or intervals) of its values. The sampling process then generates samples within each possible combination of levels, corresponding to each parameter. This scheme produces a more comprehensive sampling of the factors’ variability space, as it accounts for all candidate combinations of factor levels (Fig. 3.3 (a)). If the number of levels is the same across all factors, the number of generated samples is estimated using \(n^k\), where \(n\) is the number of levels and \(k\) is the number of factors. For example, Fig. 3.3 (a) presents a full factorial sampling of three uncertain factors \((x_1,\) \(x_2,\) and \(x_3)\), each considered as having four discrete levels. The total number of samples necessary for such an experiment is \(4^3=64\). As the number of factors increases, the number of simulations necessary will also grow exponentially, making full factorial sampling computationally burdensome (Fig. 3.3 (b)). As a result, it is common in the literature to apply full factorial sampling at only two levels per factor, typically the two extremes [60]. This significantly reduces computational burden but is only considered appropriate in cases where factors can indeed only assume two discrete values (e.g., when testing the effects of epistemic uncertainty and comparing between model structure A and model structure B). In the case of physical parameters on continuous distributions (e.g., when considering the effects of measurement uncertainty in a temperature sensor), discretizing the range of a factor to only extreme levels can bias its estimated importance.

+

Fractional factorial sampling is a widely used alternative to full factorial sampling that allows the analyst to significantly reduce the number of simulations by focusing on the main effects of a factor and seeking to avoid model runs that yield redundant response information [49]. In other words, if one can reasonably assume that higher-order interactions are negligible, information about the most significant effects and lower-order interactions (e.g., effects from pairs of factors) can be obtained using a fraction of the full factorial design. Traditionally, fractional factorial design has also been limited to two levels [60], referred to as Fractional Factorial designs 2k-p [61]. Recently, Generalized Fractional Factorial designs have also been proposed that allow for the structured generation of samples at more than two levels per factor [62]. Consider a case where the modeling team dealing with the problem in Fig. 3.3 (a) cannot afford to perform 64 simulations of their model. They can afford 32 runs for their experiment and instead decide to fractionally sample the variability space of their factors. A potential design of such a sampling strategy is presented in Fig. 3.3 (c).

+
+Figure 3.3 + +
+

Alternative designs of experiments and their computational costs for three uncertain factors \((x_1,\) \(x_2,\) and \(x_3)\). (a) Full factorial design sampling of three factors at four levels, at a total of 64 samples; (b) exponential growth of necessary number of samples when applying full factorial design at four levels; (c) fractional factorial design of three factors at four levels, at a total of 32 samples; and (d) Latin Hypercube sample of three factors with uniform distributions, at a total of 32 samples.#

+
+
+
+
+

Latin Hypercube Sampling (LHS)#

+

Latin hypercube sampling (LHS) [63] is one of the most common methods in space-filling experimental designs. With this sampling technique, for \(N\) uncertain factors, an \(N\)-dimensional hypercube is generated, with each factor divided into an equal number of levels depending on the total number of samples to be generated. Equal numbers of samples are then randomly generated at each level, across all factors. In this manner, latin hypercube design guarantees sampling from every level of the variability space and without any overlaps. When the number of samples generated is much larger than the number of uncertain factors, LHS can be very effective in examining the effects of each factor [49]. LHS is an attractive technique, because it guarantees a diverse coverage of the space, through the use of subintervals, without being constrained to discrete levels for each factor - compare Fig. 3.3 (c) with Fig. 3.3 (d) for the same number of samples.

+

LHS is less effective when the number of samples is not much larger than the number of uncertain factors, and the effects of each factor cannot be appropriately distinguished. The samples between factors can also be highly correlated, biasing any subsequent sensitivity analysis results. To address this, the sampling scheme can be modified to control for the correlation in parameters while maximizing the information derived. An example of such modification is through the use of orthogonal arrays [64].

+
+
+

Low-Discrepancy Sequences#

+

Low-discrepancy sequences is another sampling technique that employs a pseudo-random generator for Monte Carlo sampling [65, 66]. These quasi-Monte Carlo methods eliminate ‘lumpiness’ across samples (i.e, the presence of gaps and clusters) by minimizing discrepancy across the hypercube samples. Discrepancy can be quantitatively measured using the deviations of sampled points from a uniform distribution [65, 67]. Low-discrepancy sequences ensure that the number of samples in any subspace of the variability hypercube is approximately the same. This is not something guaranteed by Latin Hypercube sampling, and even though its design can be improved through optimization with various criteria, such adjustments are limited to small sample sizes and low dimensions [67, 68, 69, 70, 71]. In contrast, the Sobol sequence [72, 73], one of the most widely used sampling techniques, utilizes the low-discrepancy approach to uniformly fill the sampled factor space. A core advantage of this style of sampling is that it takes far fewer samples (i.e., simulations) to attain a much lower level of error in estimating model output statistics (e.g., the mean and variance of outputs).

+
+

Note

+

Put this into practice! Click the following link to try out an interactive tutorial which uses Sobol sequence sampling for the purposes of a Sobol sensitivity analysis: Sobol SA using SALib Jupyter Notebook

+
+
+
+

Other types of sampling#

+

The sampling techniques mentioned so far are general sampling methods useful for a variety of applications beyond sensitivity analysis. There are however techniques that have been developed for specific sensitivity analysis methods. Examples of these methods include the Morris One-At-a-Time [74], Fourier Amplitude Sensitivity Test (FAST; [75]), Extended FAST [76], and Extended Sobol methods [77]. For example, the Morris sampling strategy builds a number of trajectories (usually referred to as repetitions and denoted by \(r\)) in the input space each composed of \(N+1\) factor points, where \(N\) is the number of uncertain factors. The first point of the trajectory is selected randomly and the subsequent \(N\) points are generated by moving one factor at a time by a fixed amount. Each factor is perturbed once along the trajectory, while the starting points of all of the trajectories are randomly and uniformly distributed. Several variations of this strategy also exist in the literature; for more details on each approach and their differences the reader is directed to Pianosi et al. [51].

+
+
+

Synthetic generation of input time series#

+

Models often have input time series or processes with strong temporal and/or spatial correlations (e.g., streamflow, energy demand, pricing of commodities, etc.) that, while they might not immediately come to mind as factors to be examined in sensitivity analysis, can be treated as such. Synthetic input time series are used for a variety of reasons, for example, when observations are not available or are limited, or when past observations are not considered sufficiently representative to capture rare or extreme events of interest [78, 79]. Synthetic generation of input time series provides a valuable tool to consider non-stationarity and incorporate potential stressors, such as climate change impacts into input time series [80]. For example, a century of record will be insufficient to capture very high impact rare extreme events (e.g., persistent multi-year droughts). A large body of statistical literature exists focusing on the topics of synthetic weather [81, 82] and streamflow [83, 84] generation that provides a rich suite of approaches for developing history-informed, well-characterized stochastic process models to better estimate rare individual or compound (hot, severe drought) extremes. It is beyond the scope of this text to review these methods, but readers are encouraged to explore the studies cited above as well as the following publications for discussions and comparisons of these methods: [78, 80, 85, 86, 87, 88, 89]. The use of these methods for the purposes of exploratory modeling, especially in the context of well-characterized versus deep uncertainty, is further discussed in Chapter 4.3.

+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.4.1_derivative_based_methods.html b/dev/docs/html/3.4.1_derivative_based_methods.html new file mode 100644 index 0000000..21bf25b --- /dev/null +++ b/dev/docs/html/3.4.1_derivative_based_methods.html @@ -0,0 +1,461 @@ + + + + + + + + + + + Derivative-based Methods — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Derivative-based Methods

+ +
+
+ +
+
+
+ + + + +
+ +
+

Derivative-based Methods#

+

Derivative-based methods explore how model outputs are affected by perturbations in a single model input around a particular input value. These methods are local and are performed using OAT sampling. For simplicity of mathematical notations, let us assume that the model \(g(X)\) only returns one output. Following [90] and [51], the sensitivity index, \(S_i\) , of the model’s i-th input factor, \(x_i\) , can be measured using the partial derivative evaluated at a nominal value, \(\bar{x}\), of the vector of inputs:

+
+\[S_i (\bar{x}) = \frac{\partial g}{\partial x} |_{\bar{x}{^c{_i}}}\]
+

where ci is the scaling factor. In most applications however, the relationship \(g(X)\) is not fully known in its analytical form, and therefore the above partial derivative is usually approximated:

+
+\[S_i (\bar{x}) = \frac{g(\bar{x}_1,...\bar{x}_i+\Delta_i,...\bar{x}_N)-g(\bar{x}_1,...\bar{x}_i,...\bar{x}_N)}{\Delta_i}c_i\]
+

Using this approximation, the i-th input factor is perturbed by a magnitude of \(\Delta_i\), and its relative importance is calculated. Derivative-based methods are some of the oldest sensitivity analysis methods as they only require \(N+1\) model evaluations to estimate indices for \(N\) uncertain factors. As described above, being computationally very cheap comes at the cost of not being able to explore the entire input space, but only (local) perturbations to the nominal value. Additionally, as these methods examine the effects of each input factor one at a time, they cannot assess parametric interactions or capture the interacting nature of many real systems and the models that abstract them.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.4.2_elementary_effect_methods.html b/dev/docs/html/3.4.2_elementary_effect_methods.html new file mode 100644 index 0000000..808b173 --- /dev/null +++ b/dev/docs/html/3.4.2_elementary_effect_methods.html @@ -0,0 +1,469 @@ + + + + + + + + + + + Elementary Effect Methods — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Elementary Effect Methods

+ +
+
+ +
+
+
+ + + + +
+ +
+

Elementary Effect Methods#

+

Elementary effect (EE) SA methods provide a solution to the local nature of the derivative-based methods by exploring the entire parametric range of each input parameter [91]. However, EE methods still use OAT sampling and do not vary all input parameters simultaneously while exploring the parametric space. The OAT nature of EEs methods therefore prevents them from properly capturing the interactions between uncertain factors. EEs methods are computationally efficient compared to their All-At-a-Time (AAT) counterparts, making them more suitable when computational capacity is a limiting factor, while still allowing for some inferences regarding factor interactions. +The most popular EE method is the Method of Morris [74]. Following the notation by [51], this method calculates global sensitivity using the mean of the EEs (finite differences) of each parameter at different locations:

+
+\[S_i = \mu_i^* = \frac{1}{r}\sum_{j=1}^r EE^j_i = \frac{1}{r}\sum_{j=1}^r \frac{g(\bar{x}_1,...\bar{x}_i+\Delta_i,...\bar{x}_N)-g(\bar{x}_1,...\bar{x}_i,...\bar{x}_N)}{\Delta_i}c_i\]
+

with \(r\) representing the number of sample repetitions (also refered to as trajectories) in the input space, usually set between 4 and 10 [38]. Each \(x_j\) represents the points of each trajectory, with \(j=1,…, r\), selected as described in the sampling strategy for this method, found above. This method also produces the standard deviation of the EEs:

+
+\[\sigma_i = \sqrt{\frac{1}{r}\sum_{j=1}^r(EE_i^j-\frac{1}{r}\sum_{j=1}^r EE^j_i)^2}\]
+

which is a measure of parametric interactions. Higher values of \(\sigma_i\) suggest model responses at different levels of factor \(x_i\) are significantly different, which indicates considerable interactions between that and other uncertain factors. The values of \(\mu_i^*\) and \(\sigma_i\) for each factor allow us to draw several different conclusions, illustrated in Fig. 3.4, following the example by [91]. In this example, factors \(x_1\), \(x_2\), \(x_4\), and \(x_5\) can be said to have an influence on the model outputs, with \(x_1\), \(x_4\), and \(x_5\) having some interactive or non-linear effects. Depending on the orders of magnitude of \(\mu_i^*\) and \(\sigma_i\) one can indirectly deduce whether the factors have strong interactive effects, for example if a factor \(\sigma_i << \mu_i^*\) then the relationship between that factor and the output can be assumed to be largely linear (note that this is still an OAT method and assumptions on factor interactions should be strongly caveated). Extensions of the Method of Morris have also been developed specifically for the purposes of factor fixing and explorations of parametric interactions (e.g., [48, 92, 93]).

+
+Figure 3.4 + +
+

Illustrative results of the Morris Method. Factors \(x_1\), \(x_2\), \(x_4\), and \(x_5\) have an influence on the model outputs, with \(x_1\), \(x_4\), and \(x_5\) having interactive or non-linear effects. Whether or not a factor should be considered influential to the output depends on the output selected and is specific to the research context and purpose of the analysis, as discussed in Chapter 3.2.#

+
+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.4.3_regression_based_methods.html b/dev/docs/html/3.4.3_regression_based_methods.html new file mode 100644 index 0000000..6383a1e --- /dev/null +++ b/dev/docs/html/3.4.3_regression_based_methods.html @@ -0,0 +1,469 @@ + + + + + + + + + + + Regression-based Methods — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Regression-based Methods

+ +
+
+ +
+
+
+ + + + +
+ +
+

Regression-based Methods#

+

Regression analysis is one of the oldest ways of investigating parametric importance and sensitivity [38]. Here, we describe some of the most popular regression-based sensitivity indices. One of the main sensitivity indices of this category is the standardized regression coefficient (SRC). To calculate SRC, a linear regression relationship needs to be fitted between the input vector, \(x\), and the model output of interest by using a least-square minimizing method:

+
+\[y = b_0 + \sum_{i=1}^N b_ix_i\]
+

where \(b_0\) and \(b_i\) (corresponding to the i-th model input) are regression coefficients. The following relationship can then be used to calculate the SRCs for different input values:

+
+\[S_i=SRC_i=b_i\frac{\sigma_i}{\sigma_y}\]
+

where \(\sigma_i\) and \(\sigma_y\) are standard deviations of i-th model input and output, respectively.

+

Several other regression-based indices explore the correlation between input and output parameters as a proxy to model parametric sensitivity [91, 94, 95]. The Pearson correlation coefficient (PCC) can be used when a linear relationship exists between an uncertain factor, \(x_i\), and the output \(y\):

+
+\[S_i=PCC=\frac{cov(x_i,y)}{\sigma_i\sigma_y}\]
+

In cases when there are outliers in the data or the relationship between the uncertain factors and the output is not linear, rank-based correlation coefficients are preferred, for example, Spearman’s rank correlation coefficient (SRCC):

+
+\[S_i=SRCC=\frac{cov(rx_i,ryi)}{\sigma_{ri}\sigma_{ry}}\]
+

where the raw values of \(x_i\) and \(y\) and converted to ranks \(rx_i\) and \(ry\) respectively, which instead represent a measurement of the strength of the monotonic relationship, rather than linear relationship, between the input and output. Other regression-based metrics include the partial correlations coefficient, the partial rank correlations coefficient, and the Nash-Sutcliffe coefficient, more discussion on which can be found in [39, 91].

+

Tree-based regression techniques have also been used for sensitivity analysis in an effort to address the challenges faced with nonlinear models [96]. Examples of these methods include the Patient Rule Induction Method (PRIM; [97]) and Classification And Regression Trees (CART; [98]). CART-based approaches also include boosting and bagging extensions [99, 100]. These methods are particularly useful when sensitivity analysis is used for factor mapping (i.e., when trying to identify which uncertain model factors produce a certain model behavior). Chapter 4.3 elaborates on the use of these methods. Regression-based sensitivity analysis methods are global by nature and can explore the entire space of variables. However, the true level of comprehensiveness depends on the design of experiments and the number of simulations providing data to establish the regression relationships. Although they are usually computationally efficient, they do not produce significant information about parametric interactions [38, 39].

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.4.4_regional_sensitivity_analysis.html b/dev/docs/html/3.4.4_regional_sensitivity_analysis.html new file mode 100644 index 0000000..ae309e0 --- /dev/null +++ b/dev/docs/html/3.4.4_regional_sensitivity_analysis.html @@ -0,0 +1,459 @@ + + + + + + + + + + + Regional Sensitivity Analysis — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Regional Sensitivity Analysis

+ +
+
+ +
+
+
+ + + + +
+ +
+

Regional Sensitivity Analysis#

+

Another method primarily applied for basic factor mapping applications is Regional Sensitivity Analysis (RSA; [101]). RSA is a global sensitivity analysis method that is typically implemented using standard sampling methods such as latin hypercube sampling. It is performed by specifying a condition on the output space (e.g., an upper threshold) and classifying outputs that meet the condition as behavioral and the ones that fail it as non-behavioral (illustrated in Fig. 3.2 (b)). Note that the specified threshold depends on the nature of the problem, model, and the research question. It can reflect model-performance metrics (such as errors) or consequential decision-relevant metrics (such as unacceptable system outcomes). The behavioral and non-behavioral outputs are then traced back to their originating sampled factors, where differences between the distributions of samples can be used to determine their significance in producing each part of the output. The Kolmogorov-Smirnov divergence is commonly used to quantify the difference between the distribution of behavioral and non-behavioral parameters [51]:

+
+\[S_i=|F_{x_i|y_b} (y \in Y_b)-F_{x_i|y_{nb}} (y \in Y_{nb})|\]
+

where \(Y_b\) represents the set of behavioral outputs, and \(F_{x_i|y_b}\) is the empirical cumulative distribution function of the values of \(x_i\) associated with values of \(y\) that belong in the behavioral set. The \(nb\) notation indicates the equivalent elements related to the non-behavioral set. Large differences between the two distributions indicate stronger effects by the parameters on the respective part of the output space.

+

Used in a factor mapping setting, RSA can be applied for scenario discovery [102, 103], the Generalized Likelihood Uncertainty Estimation method (GLUE; [18, 104, 105]) and other hybrid sensitivity analysis methods (e.g., [106, 107]). The fundamental shortcomings of RSA are that, in some cases, it could be hard to interpret the difference between behavioral and non-behavioral sample sets, and that insights about parametric correlations and interactions cannot always be uncovered [38]. For more elaborate discussions and illustrations of the RSA method, readers are directed to Tang et al. [42], Saltelli et al. [49], Young [108] and references therein.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.4.5_variance_based_methods.html b/dev/docs/html/3.4.5_variance_based_methods.html new file mode 100644 index 0000000..adc59a7 --- /dev/null +++ b/dev/docs/html/3.4.5_variance_based_methods.html @@ -0,0 +1,472 @@ + + + + + + + + + + + Variance-based Methods — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Variance-based Methods

+ +
+
+ +
+
+
+ + + + +
+ +
+

Variance-based Methods#

+

Variance-based sensitivity analysis methods hypothesize that various specified model factors contribute differently to the variation of model outputs; therefore, decomposition and analysis of output variance can determine a model’s sensitivity to input parameters [38, 77]. The most popular variance-based method is the Sobol method, which is a global sensitivity analysis method that takes into account complex and nonlinear factor interaction when calculating sensitivity indices, and employs more sophisticated sampling methods (e.g., the Sobol sampling method). The Sobol method is able to calculate three types of sensitivity indices that provide different types of information about model sensitivities. These indices include first-order, higher-order (e.g., second-, third-, etc. orders), and total-order sensitivities.

+

The first-order sensitivity index indicates the percent of model output variance contributed by a factor individually (i.e., the effect of varying \(x_i\) alone) and is obtained using the following [77, 109]:

+
+\[S_i^1=\frac{V_{x_i}[E_{x_{\sim i}}(x_i)]}{V(y)}\]
+

with \(E\) and \(V\) denoting the expected value and the variance, respectively. \(x_{\sim i}\) denotes all factors expect for \(x_i\). The first-order sensitivity index (\(S_i^1\)) can therefore also be thought of as the portion of total output variance (\(V_y\)) that can be reduced if the uncertainty in factor \(x_i\) is eliminated [110]. First-order sensitivity indices are usually used to understand the independent effect of a factor and to distinguish its individual versus interactive influence. It would be expected for linearly independent factors that they would only have first order indices (no interactions) that should correspond well with sensitivities obtained from simpler methods using OAT sampling.

+

Higher-order sensitivity indices explore the interaction between two or more parameters that contribute to model output variations. For example, a second-order index indicates how interactions between a pair of factors can lead to change in model output variance and is calculated using the following relationship:

+
+\[S_{ij}^2=\frac{V_{x_{i,j}}[E_{x_{\sim i,j}}(x_i,x_j)]}{V(y)}\]
+

with \(i \ne j\). Higher order indices can be calculated by similar extensions (i.e., fixing additional operators together), but it is usually computationally expensive in practice.

+

The total sensitivity analysis index represents the entire influence of an input factor on model outputs including all of its interactions with other factors [111]. In other words, total-order indices include first-order and all higher-order interactions associated with each factor and can be estimated calculated using the following:

+
+\[S_i^T= \frac{E_{x_{\sim i}}[V_{x_i}(x_{\sim i})]}{V(y)} = 1 - \frac{V_{x_{\sim i}}[E_{x_{i}}(x_{\sim i})]}{V(y)}\]
+

This index reveals the expected portion of variance that remains if uncertainty is eliminated in all factors but \(x_i\) [110]. The total sensitivity index is the overall best measure of sensitivity as it captures the full individual and interactive effects of model factors.

+

Besides the Sobol method, there are some other variance-based sensitivity analysis methods, such as the Fourier amplitude sensitivity test (FAST; [75, 112]) and extended-FAST [113, 114], that have been used by the scientific community. However, Sobol remains by far the most common method of this class. Variance-based techniques have been widely used and have proved to be powerful in a variety of applications. Despite their popularity, some authors have expressed concerns about the methods’ appropriateness in some settings. Specifically, the presence of heavy-tailed distributions or outliers, or when model outputs are multimodal can bias the sensitivity indices produced by these methods [115, 116, 117]. Moment-independent measures, discussed below, attempt to overcome these challenges.

+
+

Note

+

Put this into practice! Click the following link to try out an interactive tutorial which demonstrates the application of a Sobol sensitivity analysis: Sobol SA using SALib Jupyter Notebook

+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.4.6_analysis_of_variance_anova.html b/dev/docs/html/3.4.6_analysis_of_variance_anova.html new file mode 100644 index 0000000..c446764 --- /dev/null +++ b/dev/docs/html/3.4.6_analysis_of_variance_anova.html @@ -0,0 +1,453 @@ + + + + + + + + + + + Analysis of Variance (ANOVA) — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Analysis of Variance (ANOVA)

+ +
+
+ +
+
+
+ + + + +
+ +
+

Analysis of Variance (ANOVA)#

+

Analysis of Variance (ANOVA) was first introduced by Fisher and others [118] and has since become a popular factor analysis method in physical experiments. ANOVA can be used as a sensitivity analysis method in computational experiments with a factorial design of experiment (referred to as factorial ANOVA). Note that Sobol can also be categorized as an ANOVA sensitivity analysis method, and that is why Sobol is sometimes referred to as a functional ANOVA [119]. Factorial ANOVA methods are particularly suited for models and problems that have discrete input spaces, significantly reducing the computational time. More information about these methods can be found in [119, 120, 121].

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.4.7_moment_independent_density_based_methods.html b/dev/docs/html/3.4.7_moment_independent_density_based_methods.html new file mode 100644 index 0000000..d6eefd1 --- /dev/null +++ b/dev/docs/html/3.4.7_moment_independent_density_based_methods.html @@ -0,0 +1,458 @@ + + + + + + + + + + + Moment-Independent (Density-Based) Methods — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Moment-Independent (Density-Based) Methods

+ +
+
+ +
+
+
+ + + + +
+ +
+

Moment-Independent (Density-Based) Methods#

+

These methods typically compare the entire distribution (i.e., not just the variance) of input and output parameters in order to determine the sensitivity of the output to a particular input variable. Several moment-independent sensitivity analysis methods have been proposed in recent years. The delta (\(\delta\)) moment-independent method calculates the difference between unconditional and conditional cumulative distribution functions of the output. The method was first introduced by [122, 123] and has become widely used in various disciplines. The \(\delta\) sensitivity index is defined as follows:

+
+\[S_i=\delta_i=\frac{1}{2}E_{x_i}|f_y(y)-f_{y|x_i}(y)|dy\]
+

where \(f_y(y)\) is the probability density function of the entire model output \(y\), and \(f_{y|x_i}(y)\) is the conditional density of \(y\), given that factor \(x_i\) assumes a fixed value. The \(\delta_i\) sensitivity indicator therefore represents the normalized expected shift in the distribution of \(y\) provoked by \(x_i\). Moment-independent methods are advantageous in cases where we are concerned about the entire distribution of events, such as when uncertain factors lead to more extreme events in a system [13]. Further, they can be used with a pre-existing sample of data, without requiring a specific sampling scheme, unlike the previously reviewed methods [124]. The \(\delta\) sensitivity index does not include interactions between factors and it is therefore akin to the first order index produced by the Sobol method. Interactions between factors can still be estimated using this method, by conditioning the calculation on more than one uncertain factor being fixed [123].

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.4_sensitivity_analysis_methods.html b/dev/docs/html/3.4_sensitivity_analysis_methods.html new file mode 100644 index 0000000..c8b706e --- /dev/null +++ b/dev/docs/html/3.4_sensitivity_analysis_methods.html @@ -0,0 +1,578 @@ + + + + + + + + + + + Sensitivity Analysis Methods — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

Sensitivity Analysis Methods#

+

In this section, we describe some of the most widely applied sensitivity analysis methods along with their mathematical definitions. We also provide a detailed discussion on applying each method, as well as a comparison of and their features and limitations.

+
+

Derivative-based Methods#

+

Derivative-based methods explore how model outputs are affected by perturbations in a single model input around a particular input value. These methods are local and are performed using OAT sampling. For simplicity of mathematical notations, let us assume that the model \(g(X)\) only returns one output. Following [90] and [51], the sensitivity index, \(S_i\) , of the model’s i-th input factor, \(x_i\) , can be measured using the partial derivative evaluated at a nominal value, \(\bar{x}\), of the vector of inputs:

+
+\[S_i (\bar{x}) = \frac{\partial g}{\partial x} |_{\bar{x}{^c{_i}}}\]
+

where ci is the scaling factor. In most applications however, the relationship \(g(X)\) is not fully known in its analytical form, and therefore the above partial derivative is usually approximated:

+
+\[S_i (\bar{x}) = \frac{g(\bar{x}_1,...\bar{x}_i+\Delta_i,...\bar{x}_N)-g(\bar{x}_1,...\bar{x}_i,...\bar{x}_N)}{\Delta_i}c_i\]
+

Using this approximation, the i-th input factor is perturbed by a magnitude of \(\Delta_i\), and its relative importance is calculated. Derivative-based methods are some of the oldest sensitivity analysis methods as they only require \(N+1\) model evaluations to estimate indices for \(N\) uncertain factors. As described above, being computationally very cheap comes at the cost of not being able to explore the entire input space, but only (local) perturbations to the nominal value. Additionally, as these methods examine the effects of each input factor one at a time, they cannot assess parametric interactions or capture the interacting nature of many real systems and the models that abstract them.

+
+
+

Elementary Effect Methods#

+

Elementary effect (EE) SA methods provide a solution to the local nature of the derivative-based methods by exploring the entire parametric range of each input parameter [91]. However, EE methods still use OAT sampling and do not vary all input parameters simultaneously while exploring the parametric space. The OAT nature of EEs methods therefore prevents them from properly capturing the interactions between uncertain factors. EEs methods are computationally efficient compared to their All-At-a-Time (AAT) counterparts, making them more suitable when computational capacity is a limiting factor, while still allowing for some inferences regarding factor interactions. +The most popular EE method is the Method of Morris [74]. Following the notation by [51], this method calculates global sensitivity using the mean of the EEs (finite differences) of each parameter at different locations:

+
+\[S_i = \mu_i^* = \frac{1}{r}\sum_{j=1}^r EE^j_i = \frac{1}{r}\sum_{j=1}^r \frac{g(\bar{x}_1,...\bar{x}_i+\Delta_i,...\bar{x}_N)-g(\bar{x}_1,...\bar{x}_i,...\bar{x}_N)}{\Delta_i}c_i\]
+

with \(r\) representing the number of sample repetitions (also refered to as trajectories) in the input space, usually set between 4 and 10 [38]. Each \(x_j\) represents the points of each trajectory, with \(j=1,…, r\), selected as described in the sampling strategy for this method, found above. This method also produces the standard deviation of the EEs:

+
+\[\sigma_i = \sqrt{\frac{1}{r}\sum_{j=1}^r(EE_i^j-\frac{1}{r}\sum_{j=1}^r EE^j_i)^2}\]
+

which is a measure of parametric interactions. Higher values of \(\sigma_i\) suggest model responses at different levels of factor \(x_i\) are significantly different, which indicates considerable interactions between that and other uncertain factors. The values of \(\mu_i^*\) and \(\sigma_i\) for each factor allow us to draw several different conclusions, illustrated in Fig. 3.4, following the example by [91]. In this example, factors \(x_1\), \(x_2\), \(x_4\), and \(x_5\) can be said to have an influence on the model outputs, with \(x_1\), \(x_4\), and \(x_5\) having some interactive or non-linear effects. Depending on the orders of magnitude of \(\mu_i^*\) and \(\sigma_i\) one can indirectly deduce whether the factors have strong interactive effects, for example if a factor \(\sigma_i << \mu_i^*\) then the relationship between that factor and the output can be assumed to be largely linear (note that this is still an OAT method and assumptions on factor interactions should be strongly caveated). Extensions of the Method of Morris have also been developed specifically for the purposes of factor fixing and explorations of parametric interactions (e.g., [48, 92, 93]).

+
+Figure 3.4 + +
+

Illustrative results of the Morris Method. Factors \(x_1\), \(x_2\), \(x_4\), and \(x_5\) have an influence on the model outputs, with \(x_1\), \(x_4\), and \(x_5\) having interactive or non-linear effects. Whether or not a factor should be considered influential to the output depends on the output selected and is specific to the research context and purpose of the analysis, as discussed in Chapter 3.2.#

+
+
+
+
+

Regression-based Methods#

+

Regression analysis is one of the oldest ways of investigating parametric importance and sensitivity [38]. Here, we describe some of the most popular regression-based sensitivity indices. One of the main sensitivity indices of this category is the standardized regression coefficient (SRC). To calculate SRC, a linear regression relationship needs to be fitted between the input vector, \(x\), and the model output of interest by using a least-square minimizing method:

+
+\[y = b_0 + \sum_{i=1}^N b_ix_i\]
+

where \(b_0\) and \(b_i\) (corresponding to the i-th model input) are regression coefficients. The following relationship can then be used to calculate the SRCs for different input values:

+
+\[S_i=SRC_i=b_i\frac{\sigma_i}{\sigma_y}\]
+

where \(\sigma_i\) and \(\sigma_y\) are standard deviations of i-th model input and output, respectively.

+

Several other regression-based indices explore the correlation between input and output parameters as a proxy to model parametric sensitivity [91, 94, 95]. The Pearson correlation coefficient (PCC) can be used when a linear relationship exists between an uncertain factor, \(x_i\), and the output \(y\):

+
+\[S_i=PCC=\frac{cov(x_i,y)}{\sigma_i\sigma_y}\]
+

In cases when there are outliers in the data or the relationship between the uncertain factors and the output is not linear, rank-based correlation coefficients are preferred, for example, Spearman’s rank correlation coefficient (SRCC):

+
+\[S_i=SRCC=\frac{cov(rx_i,ryi)}{\sigma_{ri}\sigma_{ry}}\]
+

where the raw values of \(x_i\) and \(y\) and converted to ranks \(rx_i\) and \(ry\) respectively, which instead represent a measurement of the strength of the monotonic relationship, rather than linear relationship, between the input and output. Other regression-based metrics include the partial correlations coefficient, the partial rank correlations coefficient, and the Nash-Sutcliffe coefficient, more discussion on which can be found in [39, 91].

+

Tree-based regression techniques have also been used for sensitivity analysis in an effort to address the challenges faced with nonlinear models [96]. Examples of these methods include the Patient Rule Induction Method (PRIM; [97]) and Classification And Regression Trees (CART; [98]). CART-based approaches also include boosting and bagging extensions [99, 100]. These methods are particularly useful when sensitivity analysis is used for factor mapping (i.e., when trying to identify which uncertain model factors produce a certain model behavior). Chapter 4.3 elaborates on the use of these methods. Regression-based sensitivity analysis methods are global by nature and can explore the entire space of variables. However, the true level of comprehensiveness depends on the design of experiments and the number of simulations providing data to establish the regression relationships. Although they are usually computationally efficient, they do not produce significant information about parametric interactions [38, 39].

+
+
+

Regional Sensitivity Analysis#

+

Another method primarily applied for basic factor mapping applications is Regional Sensitivity Analysis (RSA; [101]). RSA is a global sensitivity analysis method that is typically implemented using standard sampling methods such as latin hypercube sampling. It is performed by specifying a condition on the output space (e.g., an upper threshold) and classifying outputs that meet the condition as behavioral and the ones that fail it as non-behavioral (illustrated in Fig. 3.2 (b)). Note that the specified threshold depends on the nature of the problem, model, and the research question. It can reflect model-performance metrics (such as errors) or consequential decision-relevant metrics (such as unacceptable system outcomes). The behavioral and non-behavioral outputs are then traced back to their originating sampled factors, where differences between the distributions of samples can be used to determine their significance in producing each part of the output. The Kolmogorov-Smirnov divergence is commonly used to quantify the difference between the distribution of behavioral and non-behavioral parameters [51]:

+
+\[S_i=|F_{x_i|y_b} (y \in Y_b)-F_{x_i|y_{nb}} (y \in Y_{nb})|\]
+

where \(Y_b\) represents the set of behavioral outputs, and \(F_{x_i|y_b}\) is the empirical cumulative distribution function of the values of \(x_i\) associated with values of \(y\) that belong in the behavioral set. The \(nb\) notation indicates the equivalent elements related to the non-behavioral set. Large differences between the two distributions indicate stronger effects by the parameters on the respective part of the output space.

+

Used in a factor mapping setting, RSA can be applied for scenario discovery [102, 103], the Generalized Likelihood Uncertainty Estimation method (GLUE; [18, 104, 105]) and other hybrid sensitivity analysis methods (e.g., [106, 107]). The fundamental shortcomings of RSA are that, in some cases, it could be hard to interpret the difference between behavioral and non-behavioral sample sets, and that insights about parametric correlations and interactions cannot always be uncovered [38]. For more elaborate discussions and illustrations of the RSA method, readers are directed to Tang et al. [42], Saltelli et al. [49], Young [108] and references therein.

+
+
+

Variance-based Methods#

+

Variance-based sensitivity analysis methods hypothesize that various specified model factors contribute differently to the variation of model outputs; therefore, decomposition and analysis of output variance can determine a model’s sensitivity to input parameters [38, 77]. The most popular variance-based method is the Sobol method, which is a global sensitivity analysis method that takes into account complex and nonlinear factor interaction when calculating sensitivity indices, and employs more sophisticated sampling methods (e.g., the Sobol sampling method). The Sobol method is able to calculate three types of sensitivity indices that provide different types of information about model sensitivities. These indices include first-order, higher-order (e.g., second-, third-, etc. orders), and total-order sensitivities.

+

The first-order sensitivity index indicates the percent of model output variance contributed by a factor individually (i.e., the effect of varying \(x_i\) alone) and is obtained using the following [77, 109]:

+
+\[S_i^1=\frac{V_{x_i}[E_{x_{\sim i}}(x_i)]}{V(y)}\]
+

with \(E\) and \(V\) denoting the expected value and the variance, respectively. \(x_{\sim i}\) denotes all factors expect for \(x_i\). The first-order sensitivity index (\(S_i^1\)) can therefore also be thought of as the portion of total output variance (\(V_y\)) that can be reduced if the uncertainty in factor \(x_i\) is eliminated [110]. First-order sensitivity indices are usually used to understand the independent effect of a factor and to distinguish its individual versus interactive influence. It would be expected for linearly independent factors that they would only have first order indices (no interactions) that should correspond well with sensitivities obtained from simpler methods using OAT sampling.

+

Higher-order sensitivity indices explore the interaction between two or more parameters that contribute to model output variations. For example, a second-order index indicates how interactions between a pair of factors can lead to change in model output variance and is calculated using the following relationship:

+
+\[S_{ij}^2=\frac{V_{x_{i,j}}[E_{x_{\sim i,j}}(x_i,x_j)]}{V(y)}\]
+

with \(i \ne j\). Higher order indices can be calculated by similar extensions (i.e., fixing additional operators together), but it is usually computationally expensive in practice.

+

The total sensitivity analysis index represents the entire influence of an input factor on model outputs including all of its interactions with other factors [111]. In other words, total-order indices include first-order and all higher-order interactions associated with each factor and can be estimated calculated using the following:

+
+\[S_i^T= \frac{E_{x_{\sim i}}[V_{x_i}(x_{\sim i})]}{V(y)} = 1 - \frac{V_{x_{\sim i}}[E_{x_{i}}(x_{\sim i})]}{V(y)}\]
+

This index reveals the expected portion of variance that remains if uncertainty is eliminated in all factors but \(x_i\) [110]. The total sensitivity index is the overall best measure of sensitivity as it captures the full individual and interactive effects of model factors.

+

Besides the Sobol method, there are some other variance-based sensitivity analysis methods, such as the Fourier amplitude sensitivity test (FAST; [75, 112]) and extended-FAST [113, 114], that have been used by the scientific community. However, Sobol remains by far the most common method of this class. Variance-based techniques have been widely used and have proved to be powerful in a variety of applications. Despite their popularity, some authors have expressed concerns about the methods’ appropriateness in some settings. Specifically, the presence of heavy-tailed distributions or outliers, or when model outputs are multimodal can bias the sensitivity indices produced by these methods [115, 116, 117]. Moment-independent measures, discussed below, attempt to overcome these challenges.

+
+

Note

+

Put this into practice! Click the following link to try out an interactive tutorial which demonstrates the application of a Sobol sensitivity analysis: Sobol SA using SALib Jupyter Notebook

+
+
+
+

Analysis of Variance (ANOVA)#

+

Analysis of Variance (ANOVA) was first introduced by Fisher and others [118] and has since become a popular factor analysis method in physical experiments. ANOVA can be used as a sensitivity analysis method in computational experiments with a factorial design of experiment (referred to as factorial ANOVA). Note that Sobol can also be categorized as an ANOVA sensitivity analysis method, and that is why Sobol is sometimes referred to as a functional ANOVA [119]. Factorial ANOVA methods are particularly suited for models and problems that have discrete input spaces, significantly reducing the computational time. More information about these methods can be found in [119, 120, 121].

+
+
+

Moment-Independent (Density-Based) Methods#

+

These methods typically compare the entire distribution (i.e., not just the variance) of input and output parameters in order to determine the sensitivity of the output to a particular input variable. Several moment-independent sensitivity analysis methods have been proposed in recent years. The delta (\(\delta\)) moment-independent method calculates the difference between unconditional and conditional cumulative distribution functions of the output. The method was first introduced by [122, 123] and has become widely used in various disciplines. The \(\delta\) sensitivity index is defined as follows:

+
+\[S_i=\delta_i=\frac{1}{2}E_{x_i}|f_y(y)-f_{y|x_i}(y)|dy\]
+

where \(f_y(y)\) is the probability density function of the entire model output \(y\), and \(f_{y|x_i}(y)\) is the conditional density of \(y\), given that factor \(x_i\) assumes a fixed value. The \(\delta_i\) sensitivity indicator therefore represents the normalized expected shift in the distribution of \(y\) provoked by \(x_i\). Moment-independent methods are advantageous in cases where we are concerned about the entire distribution of events, such as when uncertain factors lead to more extreme events in a system [13]. Further, they can be used with a pre-existing sample of data, without requiring a specific sampling scheme, unlike the previously reviewed methods [124]. The \(\delta\) sensitivity index does not include interactions between factors and it is therefore akin to the first order index produced by the Sobol method. Interactions between factors can still be estimated using this method, by conditioning the calculation on more than one uncertain factor being fixed [123].

+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.5_how_to_choose_a_sensitivity_analysis_method_model_traits_and_dimensionality.html b/dev/docs/html/3.5_how_to_choose_a_sensitivity_analysis_method_model_traits_and_dimensionality.html new file mode 100644 index 0000000..fc090f6 --- /dev/null +++ b/dev/docs/html/3.5_how_to_choose_a_sensitivity_analysis_method_model_traits_and_dimensionality.html @@ -0,0 +1,464 @@ + + + + + + + + + + + How To Choose A Sensitivity Analysis Method: Model Traits And Dimensionality — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

How To Choose A Sensitivity Analysis Method: Model Traits And Dimensionality

+ +
+
+ +
+
+
+ + + + +
+ +
+

How To Choose A Sensitivity Analysis Method: Model Traits And Dimensionality#

+

Fig. 3.5, synthesized from variants found in [51, 91], presents a graphical synthesis of the methods overviewed in this section, with regards to their appropriateness of application based on the complexity of the model at hand and the computational limits on the number of model evaluations afforded. The bars below each method also indicate the sensitivity analysis purposes they are most appropriate to address, which are in turn a reflection of the motivations and research questions the sensitivity analysis is called to address. Computational intensity is measured as a multiple of the number of model factors that are considered uncertain (\(d\)). Increasing model complexity mandates that more advanced sensitivity analysis methods are applied to address potential nonlinearities, factor interactions, and discontinuities. Such methods can only be performed at increasing computational expense. For example, computationally cheap linear regression should not be used to assess factors’ importance if the model cannot be proven linear and the factors independent, because important relationships will invariably be missed (recall the example in Fig. 3.5). When computational limits do constrain applications to make simplified assumptions and sensitivity techniques, any conclusions in such cases should be delivered with clear statements of the appropriate caveats.

+
+Figure 3_5 + +
+

Classification of the sensitivity analysis methods overviewed in this section, with regards to their computational cost (horizontal axis), their appropriateness to model complexity (vertical axis), and the purpose they can be used for (colored bars). d: number of uncertain factors considered; ANOVA: Analysis of Variance; FAST: Fourier Amplitude Sensitivity Test; PRIM: Patient Rule Induction Method; CART: Classification and Regression Trees; SRCC: Spearman’s rank correlation coefficient: NSE: Nash–Sutcliffe efficiency; SRC: standardized regression coefficient; PCC: Pearson correlation coefficient. This figure is synthesized from variants found in [51, 91].#

+
+
+

The reader should also be aware that the estimates of computational intensity that are given here are indicative of magnitude and would vary depending on the sampling technique, model complexity and the level of information being asked. For example, a Sobol sensitivity analysis typically requires a sample of size \(n * d+2\) to produce first- and total-order indices, where \(d\) is the number of uncertain factors and \(n\) is a scaling factor, selected ad hoc, depending on model complexity [46]. The scaling factor \(n\) is typically set to at least 1000, but it should most appropriately be set on the basis of index convergence. In other words, a prudent analyst would perform the analysis several times with increasing \(n\) and observe at what level the indices converge to stable values [125]. The level should be the minimum sample size used in subsequent sensitivity analyses of the same system. Furthermore, if the analyst would like to better understand the degrees of interaction between factors, requiring second-order indices, the sample size would have to increase to \(n * 2d+2\) [46].

+

Another important consideration is that methods that do not require specific sampling schemes can be performed in conjunction with others without requiring additional model evaluations. None of the regression-based methods, for example, require samples of specific structures or sizes, and can be combined with other methods for complementary purposes. For instance, one could complement a Sobol analysis with an application of CART, using the same data, but to address questions relating to factor mapping (e.g., we know factor \(x_i\) is important for a model output, but we would like to also know which of its values specifically push the output to undesirable states). Lastly, comparing results from different methods performed together can be especially useful in model diagnostic settings. For example, [13] used \(\delta\) indices, first-order Sobol indices, and \(R^2\) values from linear regression, all performed on the same factors, to derive insights about the effects on factors on different moments of the output distribution and about the linearity of their relationship.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3.6_software_toolkits.html b/dev/docs/html/3.6_software_toolkits.html new file mode 100644 index 0000000..8c83d3e --- /dev/null +++ b/dev/docs/html/3.6_software_toolkits.html @@ -0,0 +1,460 @@ + + + + + + + + + + + Software Toolkits — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Software Toolkits

+ +
+
+ +
+
+
+ + + + +
+ +
+

Software Toolkits#

+

This section presents available open source sensitivity analysis software tools, based on the programming language they use and the methods they support Fig. 3.6. Our review covers five widely used programming languages: R, MATLAB, Julia, Python, and C++, as well as one tool that provides a graphical user interface (GUI). Each available SA tool was assessed on the number of SA methods and design of experiments methods it supports. For example, the sensobol package in R only supports the variance-based Sobol method. However, it is the only package we came across that calculates third-order interactions among parameters. On the other side of the spectrum, there are SA software packages that contain several popular SA methods. For example, SALib in Python [126] supports seven different SA methods. The DifferentialEquations package is a comprehensive package developed for Julia, and GlobalSensitivityAnalysis is another Julia package that has mostly adapted SALib methods. Fig. 3.6 also identifies the SA packages that have been updated since 2018, indicating active support and development.

+
+Figure 3_6 + +
+

Sensitivity analysis packages available in different programming language platforms (R, Python, Julia, MATLAB, and C++), with the number of methods they support. Packages supporting more than five methods are indicated in pink. Packages updated since 2018 are indicated with asterisks.#

+
+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/3_sensitivity_analysis_the_basics.html b/dev/docs/html/3_sensitivity_analysis_the_basics.html new file mode 100644 index 0000000..c39a15e --- /dev/null +++ b/dev/docs/html/3_sensitivity_analysis_the_basics.html @@ -0,0 +1,777 @@ + + + + + + + + + + + 3. Sensitivity Analysis: The Basics — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

3. Sensitivity Analysis: The Basics#

+
+

3.1. Global Versus Local Sensitivity#

+

Out of the several definitions for sensitivity analysis presented in the literature, the most widely used has been proposed by Saltelli et al. [38] as “the study of how uncertainty in the output of a model (numerical or otherwise) can be apportioned to different sources of uncertainty in the model input”. In other words, sensitivity analysis explores the relationship between the model’s \(N\) input variables, \(x=[x_1,x_2,...,x_N]\), and \(M\) output variables, \(y=[y_1,y_2,...,y_M]\) with \(y=g(x)\), where \(g\) is the model that maps the model inputs to the outputs [39].

+

Historically, there have been two broad categories of sensitivity analysis techniques: local and global. Local sensitivity analysis is performed by varying model parameters around specific reference values, with the goal of exploring how small input perturbations influence model performance. Due to its ease-of-use and limited computational demands, this approach has been widely used in literature, but has important limitations [40, 41]. If the model is not linear, the results of local sensitivity analysis can be heavily biased, as they are strongly influenced by independence assumptions and a limited exploration of model inputs (e.g., Tang et al. [42]). If the model’s factors interact, local sensitivity analysis will underestimate their importance, as it does not account for those effects (e.g., [43]). In general, as local sensitivity analysis only partially and locally explores a model’s parametric space, it is not considered a valid approach for nonlinear models [44]. This is illustrated in Fig. 3.1 (a-b), presenting contour plots of a model response (\(y\)) with an additive linear model (a) and with a nonlinear model (b). In a linear model without interactions between the input terms \(x_1\) and \(x_2\), local sensitivity analysis (assuming deviations from some reference values) can produce appropriate sensitivity indices (Fig. 3.1 (a)). If however, factors \(x_1\) and \(x_2\) interact, the local and partial consideration of the space can not properly account for each factor’s effects on the model response (Fig. 3.1 (b)), as it is only informative at the reference value where it is applied. In contrast, a global sensitivity analysis varies uncertain factors within the entire feasible space of variable model responses (Fig. 3.1 (c)). This approach reveals the global effects of each parameter on the model output, including any interactive effects. For models that cannot be proven linear, global sensitivity analysis is preferred and this text is primarily discussing global sensitivity analysis methods. In the text that follows, whenever we use the term sensitivity analysis we are referring to its global application.

+
+Figure 3.1 + +
+

Fig. 3.1 Treatment of a two-dimensional space of variability by local (panels a-b) and global (panel c) sensitivity analyses. Panels depict contour plots with the value of a model response (\(y\)) changing with changes in the values of input terms \(x_1\) and \(x_2\). Local sensitivity analysis is only an appropriate approach to sensitivity in the case of linear models without interactions between terms, for example in panel (a), where \(y=3x_1+5x_2\). In the case of more complex models, for example in panels (b-c), where \(y={1 \above 1pt e^{x^2_1+x^2_2}} + {50 \above 1pt e^{(0.1x_1)^2+(0.1x_2)^3}}\), local sensitivity will miscalculate sensitivity indices as the assessed changes in the value \(y\) depend on the assumed base values chose for \(x_1\) and \(x_2\) (panel (b)). In these cases, global sensitivity methods should be used instead (panel (c)). The points in panel (c) are generated using a uniform random sample of \(n=50\), but many other methods are available.#

+
+
+
+
+

3.2. Why Perform Sensitivity Analysis#

+

It is important to understand the many ways in which a SA might be of use to your modeling effort. Most commonly, one might be motivated to perform sensitivity analysis for the following reasons:

+

Model evaluation: Sensitivity analysis can be used to gauge model inferences when assumptions about the structure of the model or its parameterization are dubious or have changed. For instance, consider a numerical model that uses a set of calibrated parameter values to produce outputs, which we then use to inform decisions about the real-world system represented. One might like to know if small changes in these parameter values significantly change this model’s output and the decisions it informs or if, instead, our parameter inferences yield stable model behavior regardless of the uncertainty present in the specific parameterized processes or properties. This can either discredit or lend credence to the model at hand, as well as any inferences drawn that are founded on its accurate representation of the system. Sensitivity analysis can identify which uncertain model factors cause this undesirable model behavior.

+

Model simplification: Sensitivity analysis can also be used to identify factors or components of the model that appear to have limited effects on direct outputs or metrics of interest. Consider a model that has been developed in an organization for the purposes of a specific research question and is later used in the context of a different application. Some processes represented in significant detail might no longer be of the same importance while consuming significant data or computational resources, as different outputs might be pertinent to the new application. Sensitivity analysis can be used to identify unimportant model components and simplify them to nominal values and reduced model forms. Model complexity and computational costs can therefore be reduced.

+

Model refinement: Alternatively, sensitivity analysis can reveal the factors or processes that are highly influential to the outputs or metrics of interest, by assessing their relative importance. In the context of model evaluation, this can inform which model components warrant additional investigation or measurement so the uncertainty surrounding them and the resulting model outputs or metrics of interest can be reduced.

+

Exploratory modeling: When sufficient credence has been established in the model, sensitivity analysis can be applied to a host of other inquiries. Inferences about the factors and processes that most (or least) control a model’s outputs of interest can be extrapolated to the real system they represent and be used in a heuristic manner to inform model-based inferences. On this foundation, a model paired with the advanced techniques presented in this text can be used to “discover” decision relevant and highly consequential outcomes (i.e., scenario discovery, discussed in more detail in Chapter 4.3 [36, 45]).

+

The nature and context of the model shapes the specific objectives of applying a sensitivity analysis, as well as methods and tools most appropriate and defensible for each application setting [35, 38, 46]. The three most common sensitivity analysis modes (Factor Prioritization, Factor Fixing, and Factor Mapping) are presented below, but the reader should be aware that other uses have been proposed in the literature (e.g., [47, 48]).

+

Factor prioritization: This sensitivity analysis application mode (also referred to as factor ranking) refers to when one would like to identify the uncertain factors that have the greatest impact on the variability of the output, and which, when fixed to their true value (i.e., if there were no uncertainty regarding their value), would lead to the greatest reduction in output variability [49]. Information from this type of analysis can be crucial to model improvement as these factors can become the focus of future measurement campaigns or numerical experiments so that uncertainty in the model output can be reduced. The impact of each uncertain input on the variance of the model output is often used as the criterion for factor prioritization. Fig. 3.2 (a) shows the effects of three uncertain variables (\(X_1\), \(X_2\), and \(X_3\)) on the variance of output \(Y\). \(V(E(Y|X_i))\) indicates the variance in \(Y\) if factor \(X_i\) is left to vary freely while all other factors remain fixed to nominal values. In this case, factor \(X_2\) makes the largest contribution to the variability of output \(Y\) and it should therefore be prioritized. In the context of risk analysis, factor prioritization can be used to reduce output variance to below a given tolerable threshold (also known as variance cutting).

+

Factor fixing: This mode of sensitivity analysis (also referred to as factor screening) aims to identify the model components that have a negligible effect or make no significant contributions to the variability of the outputs or metrics of interest (usually referred to as non-influential [49]). In the stylized example of Fig. 3.2 (a), \(X_1\) makes the smallest contribution to the variability of output \(Y\) suggesting that the uncertainty in its value could be negligible and the factor itself fixed in subsequent model executions. Eliminating these factors or processes in the model or fixing them to a nominal value can help reduce model complexity as well as the unnecessary computational burden of subsequent model runs, results processing, or other sensitivity analyses (the fewer uncertain factors considered, the fewer runs are necessary to illuminate their effects on the output). Significance of the outcome can be gauged in a variety of manners, depending on the application. For instance, if applying a variance-based method, a minimum threshold value of contribution to the variance could be considered as a significance ‘cutoff’, and factors with indices below that value can be considered non-influential. Conclusions about factor fixing should be made carefully, considering all of the effects a factor has, individually and in interaction with other factors (explained in more detail in the Chapter 3.4.5).

+

Factor mapping: Finally, factor mapping can be used to pinpoint which values of uncertain factors lead to model outputs within a given range of the output space [49]. In the context of model diagnostics, it is possible that the model’s output changes in ways considered impossible based on the represented processes, or other observed evidence. In this situation, factor mapping can be used to identify which uncertain model factors cause this undesirable model behavior by ‘filtering’ model runs that are considered ‘non-behavioral’ [50, 51, 52]. In Fig. 3.2 (b), region \(B\) of the output space \(Y\) denotes the set of behavioral model outcomes and region \(\bar{B}\) denotes the set of non-behavioral outcomes, resulting from the entirety of input space \(X\). Factor mapping refers to the process of tracing which factor values of input space \(X\) produce the behavioral model outcomes in the output space.

+
+Figure 3.2 + +
+

Fig. 3.2 Factor prioritization, factor fixing and factor mapping settings of sensitivity analysis.#

+
+
+

The language used above reflects a use of sensitivity analysis for model fidelity evaluation and refinement. However, as previously mentioned, when a model has been established as a sufficiently accurate representation of the system, sensitivity analysis can produce additional inferences (i.e., exploratory modeling and scenario discovery). For instance, under the factor mapping use, the analyst can now focus on undesirable system states and discover which factors are most responsible for them: for instance, “population growth of above 25% would be responsible for unacceptably high energy demands”. Factor prioritization and factor fixing can be used to make equivalent inferences, such as “growing populations and increasing temperatures are the leading factors for changing energy demands” (prioritizing of factors) or “changing dietary needs are inconsequential to increasing energy demands for this region” (a factor that can be fixed in subsequent model runs). All these inferences hinge on the assumption that the real system’s stakeholders consider the model states faithful enough representations of system states. As elaborated in Chapter 2.2, this view on sensitivity analysis is founded on a relativist perspective on modeling, which tends to place more value on model usefulness rather than strict accuracy of representation in terms of error. As such, sensitivity analysis performed with decision-making relevance in mind will focus on model outputs or metrics that are consequential and decision relevant (e.g., energy demand in the examples above).

+
+
+

3.3. Design of Experiments#

+

Before conducting a sensitivity analysis, the first element that needs to be clarified is the uncertainty space of the model [51, 53]. In other words, how many and which factors making up the mathematical model are considered uncertain and can potentially affect the model output and the inferences drawn from it. Uncertain factors can be model parameters, model structures, inputs, or alternative model resolution levels (scales), all of which can be assessed through the tools presented in this text. Depending on the kind of factor, its variability can be elicited through various means: expert opinion, values reported in the literature, historical observations, its physical meaning (e.g., population values in a city can never be negative), or through the use of more formal UQ methods (Chapter A). The model uncertainty space represents the entire space of variability present in each of the uncertain factors of a model. The complexity of most real-world models means that the response function, \(y=g(x)\), mapping inputs to outputs, is hardly ever available in an analytical form and therefore analytically computing the sensitivity of the output to each uncertain factor becomes impossible. In these cases, sensitivity analysis is only feasible through numerical procedures that employ different strategies to sample the uncertainty space and calculate sensitivity indices.

+

A sampling strategy is often referred to as a design of experiments and represents a methodological choice made before conducting any sensitivity analysis. Experimental design was first introduced by Fisher [54] in the context of laboratory or field-based experiments. Its application in sensitivity analysis is similar to setting up a physical experiment in that it is used to discover the behavior of a system under specific conditions. An ideal design of experiments should provide a framework for the extraction of all plausible information about the impact of each factor on the output of the model. The design of experiments is used to set up a simulation platform with the minimum computational cost to answer specific questions that cannot be readily drawn from the data through analytical or common data mining techniques. Models representing coupled human-natural systems usually have a large number of inputs, state variables and parameters, but not all of them exert fundamental control over the numerical process, despite their uncertainty, nor have substantial impacts on the model output, either independently or through their interactions. Each factor influences the model output in different ways that need to be discovered. For example, the influence of a parameter on model output can be linear or non-linear and can be continuous or only be active during specific times or at particular states of the system [55, 56]. An effective and efficient design of experiments allows the analyst to explore these complex relationships and evaluate different behaviors of the model for various scientific questions [57]. The rest of this section overviews some of the most commonly used designs of experiments. Table 1 summarizes the designs discussed.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3.1 Summary of designs of experiments overviewed in this section. * Depends on the sample size.#

Design of experiments

Factor interactions considered

Treatment of factor domains

One-At-a-Time (OAT)

No - main effects only

Continuous (distributions)

Full Factorial Sampling

Yes - including total effects

Discrete (levels)

Fractional Factorial Sampling

Yes - only lower-order effects*

Discrete (levels)

Latin Hypercube (LH) Sampling

Yes - including total effects*

Continuous (distributions)

Quasi-Random Sampling with Low-Discrepancy Sequences

Yes - including total effects*

Continuous (distributions)

+
+

There are a few different approaches to the design of experiments, closely related to the chosen sensitivity analysis approach, which is in turn shaped by the research motivations, scientific questions, and computational constraints at hand (additional discussion of this can be found at the end of Chapter 3). For example, in a sensitivity analysis using perturbation and derivatives methods, the model input parameters vary from their nominal values one at a time, something that the design of experiments needs to reflect. If, instead, one were to perform sensitivity analysis using a multiple-starts perturbation method, the design of experiments needs to consider that multiple points across the factor space are used. The design of experiments specifically defines two key characteristics of samples that are fed to the numerical model: the number of samples and the range of each factor.

+

Generally, sampling can be performed randomly or by applying a stratifying approach. In random sampling, such as Monte Carlo [58], samples are randomly generated by a pseudo-random number generator with an a-priori assumption about the distribution of parameters and their possible ranges. Random seeds can also be used to ensure consistency and higher control over the random process. However, this method could leave some gaps in the parameter space and cause clustering in some spaces, especially for a large number of parameters [59]. Most sampling strategies use stratified sampling to mitigate these disadvantages. Stratified sampling techniques divide the domain of each factor into subintervals, often of equal lengths. From each subinterval, an equal number of samples is drawn randomly, or based on the specific locations within the subintervals [49].

+
+

3.3.1. One-At-a-Time (OAT)#

+

In this approach, only one model factor is changed at a time while all others are kept fixed across each iteration in a sampling sequence. The OAT method assumes that model factors of focus are linearly independent (i.e., there are no interactions) and can analyze how factors individually influence model outputs or metrics of interest. While popular given its ease of implementation, OAT is ultimately limited in its exploration of a model’s sensitivities [49]. It is primarily used with local sensitivity techniques with similar criticisms: applying this sampling scheme on a system with nonlinear and interactive processes will miss important information on the effect uncertain factors have on the model. OAT samplings can be repeated multiple times in a more sophisticated manner and across different locations of the parameter space to overcome some of these challenges, which would increase computational costs and negate the main reasons for its selection. Given these limitations OAT methods could be used as preliminary, low-cost analyses of the factors’ individual effects, but should ultimately be complemented with more sophisticated methods.

+
+
+

3.3.2. Full and Fractional Factorial Sampling#

+

In full factorial sampling each factor is treated as being discrete by considering two or more levels (or intervals) of its values. The sampling process then generates samples within each possible combination of levels, corresponding to each parameter. This scheme produces a more comprehensive sampling of the factors’ variability space, as it accounts for all candidate combinations of factor levels (Fig. 3.3 (a)). If the number of levels is the same across all factors, the number of generated samples is estimated using \(n^k\), where \(n\) is the number of levels and \(k\) is the number of factors. For example, Fig. 3.3 (a) presents a full factorial sampling of three uncertain factors \((x_1,\) \(x_2,\) and \(x_3)\), each considered as having four discrete levels. The total number of samples necessary for such an experiment is \(4^3=64\). As the number of factors increases, the number of simulations necessary will also grow exponentially, making full factorial sampling computationally burdensome (Fig. 3.3 (b)). As a result, it is common in the literature to apply full factorial sampling at only two levels per factor, typically the two extremes [60]. This significantly reduces computational burden but is only considered appropriate in cases where factors can indeed only assume two discrete values (e.g., when testing the effects of epistemic uncertainty and comparing between model structure A and model structure B). In the case of physical parameters on continuous distributions (e.g., when considering the effects of measurement uncertainty in a temperature sensor), discretizing the range of a factor to only extreme levels can bias its estimated importance.

+

Fractional factorial sampling is a widely used alternative to full factorial sampling that allows the analyst to significantly reduce the number of simulations by focusing on the main effects of a factor and seeking to avoid model runs that yield redundant response information [49]. In other words, if one can reasonably assume that higher-order interactions are negligible, information about the most significant effects and lower-order interactions (e.g., effects from pairs of factors) can be obtained using a fraction of the full factorial design. Traditionally, fractional factorial design has also been limited to two levels [60], referred to as Fractional Factorial designs 2k-p [61]. Recently, Generalized Fractional Factorial designs have also been proposed that allow for the structured generation of samples at more than two levels per factor [62]. Consider a case where the modeling team dealing with the problem in Fig. 3.3 (a) cannot afford to perform 64 simulations of their model. They can afford 32 runs for their experiment and instead decide to fractionally sample the variability space of their factors. A potential design of such a sampling strategy is presented in Fig. 3.3 (c).

+
+Figure 3.3 + +
+

Fig. 3.3 Alternative designs of experiments and their computational costs for three uncertain factors \((x_1,\) \(x_2,\) and \(x_3)\). (a) Full factorial design sampling of three factors at four levels, at a total of 64 samples; (b) exponential growth of necessary number of samples when applying full factorial design at four levels; (c) fractional factorial design of three factors at four levels, at a total of 32 samples; and (d) Latin Hypercube sample of three factors with uniform distributions, at a total of 32 samples.#

+
+
+
+
+

3.3.3. Latin Hypercube Sampling (LHS)#

+

Latin hypercube sampling (LHS) [63] is one of the most common methods in space-filling experimental designs. With this sampling technique, for \(N\) uncertain factors, an \(N\)-dimensional hypercube is generated, with each factor divided into an equal number of levels depending on the total number of samples to be generated. Equal numbers of samples are then randomly generated at each level, across all factors. In this manner, latin hypercube design guarantees sampling from every level of the variability space and without any overlaps. When the number of samples generated is much larger than the number of uncertain factors, LHS can be very effective in examining the effects of each factor [49]. LHS is an attractive technique, because it guarantees a diverse coverage of the space, through the use of subintervals, without being constrained to discrete levels for each factor - compare Fig. 3.3 (c) with Fig. 3.3 (d) for the same number of samples.

+

LHS is less effective when the number of samples is not much larger than the number of uncertain factors, and the effects of each factor cannot be appropriately distinguished. The samples between factors can also be highly correlated, biasing any subsequent sensitivity analysis results. To address this, the sampling scheme can be modified to control for the correlation in parameters while maximizing the information derived. An example of such modification is through the use of orthogonal arrays [64].

+
+
+

3.3.4. Low-Discrepancy Sequences#

+

Low-discrepancy sequences is another sampling technique that employs a pseudo-random generator for Monte Carlo sampling [65, 66]. These quasi-Monte Carlo methods eliminate ‘lumpiness’ across samples (i.e, the presence of gaps and clusters) by minimizing discrepancy across the hypercube samples. Discrepancy can be quantitatively measured using the deviations of sampled points from a uniform distribution [65, 67]. Low-discrepancy sequences ensure that the number of samples in any subspace of the variability hypercube is approximately the same. This is not something guaranteed by Latin Hypercube sampling, and even though its design can be improved through optimization with various criteria, such adjustments are limited to small sample sizes and low dimensions [67, 68, 69, 70, 71]. In contrast, the Sobol sequence [72, 73], one of the most widely used sampling techniques, utilizes the low-discrepancy approach to uniformly fill the sampled factor space. A core advantage of this style of sampling is that it takes far fewer samples (i.e., simulations) to attain a much lower level of error in estimating model output statistics (e.g., the mean and variance of outputs).

+
+

Note

+

Put this into practice! Click the following link to try out an interactive tutorial which uses Sobol sequence sampling for the purposes of a Sobol sensitivity analysis: Sobol SA using SALib Jupyter Notebook

+
+
+
+

3.3.5. Other types of sampling#

+

The sampling techniques mentioned so far are general sampling methods useful for a variety of applications beyond sensitivity analysis. There are however techniques that have been developed for specific sensitivity analysis methods. Examples of these methods include the Morris One-At-a-Time [74], Fourier Amplitude Sensitivity Test (FAST; [75]), Extended FAST [76], and Extended Sobol methods [77]. For example, the Morris sampling strategy builds a number of trajectories (usually referred to as repetitions and denoted by \(r\)) in the input space each composed of \(N+1\) factor points, where \(N\) is the number of uncertain factors. The first point of the trajectory is selected randomly and the subsequent \(N\) points are generated by moving one factor at a time by a fixed amount. Each factor is perturbed once along the trajectory, while the starting points of all of the trajectories are randomly and uniformly distributed. Several variations of this strategy also exist in the literature; for more details on each approach and their differences the reader is directed to Pianosi et al. [51].

+
+
+

3.3.6. Synthetic generation of input time series#

+

Models often have input time series or processes with strong temporal and/or spatial correlations (e.g., streamflow, energy demand, pricing of commodities, etc.) that, while they might not immediately come to mind as factors to be examined in sensitivity analysis, can be treated as such. Synthetic input time series are used for a variety of reasons, for example, when observations are not available or are limited, or when past observations are not considered sufficiently representative to capture rare or extreme events of interest [78, 79]. Synthetic generation of input time series provides a valuable tool to consider non-stationarity and incorporate potential stressors, such as climate change impacts into input time series [80]. For example, a century of record will be insufficient to capture very high impact rare extreme events (e.g., persistent multi-year droughts). A large body of statistical literature exists focusing on the topics of synthetic weather [81, 82] and streamflow [83, 84] generation that provides a rich suite of approaches for developing history-informed, well-characterized stochastic process models to better estimate rare individual or compound (hot, severe drought) extremes. It is beyond the scope of this text to review these methods, but readers are encouraged to explore the studies cited above as well as the following publications for discussions and comparisons of these methods: [78, 80, 85, 86, 87, 88, 89]. The use of these methods for the purposes of exploratory modeling, especially in the context of well-characterized versus deep uncertainty, is further discussed in Chapter 4.3.

+
+
+
+

3.4. Sensitivity Analysis Methods#

+

In this section, we describe some of the most widely applied sensitivity analysis methods along with their mathematical definitions. We also provide a detailed discussion on applying each method, as well as a comparison of and their features and limitations.

+
+

3.4.1. Derivative-based Methods#

+

Derivative-based methods explore how model outputs are affected by perturbations in a single model input around a particular input value. These methods are local and are performed using OAT sampling. For simplicity of mathematical notations, let us assume that the model \(g(X)\) only returns one output. Following [90] and [51], the sensitivity index, \(S_i\) , of the model’s i-th input factor, \(x_i\) , can be measured using the partial derivative evaluated at a nominal value, \(\bar{x}\), of the vector of inputs:

+
+\[S_i (\bar{x}) = \frac{\partial g}{\partial x} |_{\bar{x}{^c{_i}}}\]
+

where ci is the scaling factor. In most applications however, the relationship \(g(X)\) is not fully known in its analytical form, and therefore the above partial derivative is usually approximated:

+
+\[S_i (\bar{x}) = \frac{g(\bar{x}_1,...\bar{x}_i+\Delta_i,...\bar{x}_N)-g(\bar{x}_1,...\bar{x}_i,...\bar{x}_N)}{\Delta_i}c_i\]
+

Using this approximation, the i-th input factor is perturbed by a magnitude of \(\Delta_i\), and its relative importance is calculated. Derivative-based methods are some of the oldest sensitivity analysis methods as they only require \(N+1\) model evaluations to estimate indices for \(N\) uncertain factors. As described above, being computationally very cheap comes at the cost of not being able to explore the entire input space, but only (local) perturbations to the nominal value. Additionally, as these methods examine the effects of each input factor one at a time, they cannot assess parametric interactions or capture the interacting nature of many real systems and the models that abstract them.

+
+
+

3.4.2. Elementary Effect Methods#

+

Elementary effect (EE) SA methods provide a solution to the local nature of the derivative-based methods by exploring the entire parametric range of each input parameter [91]. However, EE methods still use OAT sampling and do not vary all input parameters simultaneously while exploring the parametric space. The OAT nature of EEs methods therefore prevents them from properly capturing the interactions between uncertain factors. EEs methods are computationally efficient compared to their All-At-a-Time (AAT) counterparts, making them more suitable when computational capacity is a limiting factor, while still allowing for some inferences regarding factor interactions. +The most popular EE method is the Method of Morris [74]. Following the notation by [51], this method calculates global sensitivity using the mean of the EEs (finite differences) of each parameter at different locations:

+
+\[S_i = \mu_i^* = \frac{1}{r}\sum_{j=1}^r EE^j_i = \frac{1}{r}\sum_{j=1}^r \frac{g(\bar{x}_1,...\bar{x}_i+\Delta_i,...\bar{x}_N)-g(\bar{x}_1,...\bar{x}_i,...\bar{x}_N)}{\Delta_i}c_i\]
+

with \(r\) representing the number of sample repetitions (also refered to as trajectories) in the input space, usually set between 4 and 10 [38]. Each \(x_j\) represents the points of each trajectory, with \(j=1,…, r\), selected as described in the sampling strategy for this method, found above. This method also produces the standard deviation of the EEs:

+
+\[\sigma_i = \sqrt{\frac{1}{r}\sum_{j=1}^r(EE_i^j-\frac{1}{r}\sum_{j=1}^r EE^j_i)^2}\]
+

which is a measure of parametric interactions. Higher values of \(\sigma_i\) suggest model responses at different levels of factor \(x_i\) are significantly different, which indicates considerable interactions between that and other uncertain factors. The values of \(\mu_i^*\) and \(\sigma_i\) for each factor allow us to draw several different conclusions, illustrated in Fig. 3.4, following the example by [91]. In this example, factors \(x_1\), \(x_2\), \(x_4\), and \(x_5\) can be said to have an influence on the model outputs, with \(x_1\), \(x_4\), and \(x_5\) having some interactive or non-linear effects. Depending on the orders of magnitude of \(\mu_i^*\) and \(\sigma_i\) one can indirectly deduce whether the factors have strong interactive effects, for example if a factor \(\sigma_i << \mu_i^*\) then the relationship between that factor and the output can be assumed to be largely linear (note that this is still an OAT method and assumptions on factor interactions should be strongly caveated). Extensions of the Method of Morris have also been developed specifically for the purposes of factor fixing and explorations of parametric interactions (e.g., [48, 92, 93]).

+
+Figure 3.4 + +
+

Fig. 3.4 Illustrative results of the Morris Method. Factors \(x_1\), \(x_2\), \(x_4\), and \(x_5\) have an influence on the model outputs, with \(x_1\), \(x_4\), and \(x_5\) having interactive or non-linear effects. Whether or not a factor should be considered influential to the output depends on the output selected and is specific to the research context and purpose of the analysis, as discussed in Chapter 3.2.#

+
+
+
+
+

3.4.3. Regression-based Methods#

+

Regression analysis is one of the oldest ways of investigating parametric importance and sensitivity [38]. Here, we describe some of the most popular regression-based sensitivity indices. One of the main sensitivity indices of this category is the standardized regression coefficient (SRC). To calculate SRC, a linear regression relationship needs to be fitted between the input vector, \(x\), and the model output of interest by using a least-square minimizing method:

+
+\[y = b_0 + \sum_{i=1}^N b_ix_i\]
+

where \(b_0\) and \(b_i\) (corresponding to the i-th model input) are regression coefficients. The following relationship can then be used to calculate the SRCs for different input values:

+
+\[S_i=SRC_i=b_i\frac{\sigma_i}{\sigma_y}\]
+

where \(\sigma_i\) and \(\sigma_y\) are standard deviations of i-th model input and output, respectively.

+

Several other regression-based indices explore the correlation between input and output parameters as a proxy to model parametric sensitivity [91, 94, 95]. The Pearson correlation coefficient (PCC) can be used when a linear relationship exists between an uncertain factor, \(x_i\), and the output \(y\):

+
+\[S_i=PCC=\frac{cov(x_i,y)}{\sigma_i\sigma_y}\]
+

In cases when there are outliers in the data or the relationship between the uncertain factors and the output is not linear, rank-based correlation coefficients are preferred, for example, Spearman’s rank correlation coefficient (SRCC):

+
+\[S_i=SRCC=\frac{cov(rx_i,ryi)}{\sigma_{ri}\sigma_{ry}}\]
+

where the raw values of \(x_i\) and \(y\) and converted to ranks \(rx_i\) and \(ry\) respectively, which instead represent a measurement of the strength of the monotonic relationship, rather than linear relationship, between the input and output. Other regression-based metrics include the partial correlations coefficient, the partial rank correlations coefficient, and the Nash-Sutcliffe coefficient, more discussion on which can be found in [39, 91].

+

Tree-based regression techniques have also been used for sensitivity analysis in an effort to address the challenges faced with nonlinear models [96]. Examples of these methods include the Patient Rule Induction Method (PRIM; [97]) and Classification And Regression Trees (CART; [98]). CART-based approaches also include boosting and bagging extensions [99, 100]. These methods are particularly useful when sensitivity analysis is used for factor mapping (i.e., when trying to identify which uncertain model factors produce a certain model behavior). Chapter 4.3 elaborates on the use of these methods. Regression-based sensitivity analysis methods are global by nature and can explore the entire space of variables. However, the true level of comprehensiveness depends on the design of experiments and the number of simulations providing data to establish the regression relationships. Although they are usually computationally efficient, they do not produce significant information about parametric interactions [38, 39].

+
+
+

3.4.4. Regional Sensitivity Analysis#

+

Another method primarily applied for basic factor mapping applications is Regional Sensitivity Analysis (RSA; [101]). RSA is a global sensitivity analysis method that is typically implemented using standard sampling methods such as latin hypercube sampling. It is performed by specifying a condition on the output space (e.g., an upper threshold) and classifying outputs that meet the condition as behavioral and the ones that fail it as non-behavioral (illustrated in Fig. 3.2 (b)). Note that the specified threshold depends on the nature of the problem, model, and the research question. It can reflect model-performance metrics (such as errors) or consequential decision-relevant metrics (such as unacceptable system outcomes). The behavioral and non-behavioral outputs are then traced back to their originating sampled factors, where differences between the distributions of samples can be used to determine their significance in producing each part of the output. The Kolmogorov-Smirnov divergence is commonly used to quantify the difference between the distribution of behavioral and non-behavioral parameters [51]:

+
+\[S_i=|F_{x_i|y_b} (y \in Y_b)-F_{x_i|y_{nb}} (y \in Y_{nb})|\]
+

where \(Y_b\) represents the set of behavioral outputs, and \(F_{x_i|y_b}\) is the empirical cumulative distribution function of the values of \(x_i\) associated with values of \(y\) that belong in the behavioral set. The \(nb\) notation indicates the equivalent elements related to the non-behavioral set. Large differences between the two distributions indicate stronger effects by the parameters on the respective part of the output space.

+

Used in a factor mapping setting, RSA can be applied for scenario discovery [102, 103], the Generalized Likelihood Uncertainty Estimation method (GLUE; [18, 104, 105]) and other hybrid sensitivity analysis methods (e.g., [106, 107]). The fundamental shortcomings of RSA are that, in some cases, it could be hard to interpret the difference between behavioral and non-behavioral sample sets, and that insights about parametric correlations and interactions cannot always be uncovered [38]. For more elaborate discussions and illustrations of the RSA method, readers are directed to Tang et al. [42], Saltelli et al. [49], Young [108] and references therein.

+
+
+

3.4.5. Variance-based Methods#

+

Variance-based sensitivity analysis methods hypothesize that various specified model factors contribute differently to the variation of model outputs; therefore, decomposition and analysis of output variance can determine a model’s sensitivity to input parameters [38, 77]. The most popular variance-based method is the Sobol method, which is a global sensitivity analysis method that takes into account complex and nonlinear factor interaction when calculating sensitivity indices, and employs more sophisticated sampling methods (e.g., the Sobol sampling method). The Sobol method is able to calculate three types of sensitivity indices that provide different types of information about model sensitivities. These indices include first-order, higher-order (e.g., second-, third-, etc. orders), and total-order sensitivities.

+

The first-order sensitivity index indicates the percent of model output variance contributed by a factor individually (i.e., the effect of varying \(x_i\) alone) and is obtained using the following [77, 109]:

+
+\[S_i^1=\frac{V_{x_i}[E_{x_{\sim i}}(x_i)]}{V(y)}\]
+

with \(E\) and \(V\) denoting the expected value and the variance, respectively. \(x_{\sim i}\) denotes all factors expect for \(x_i\). The first-order sensitivity index (\(S_i^1\)) can therefore also be thought of as the portion of total output variance (\(V_y\)) that can be reduced if the uncertainty in factor \(x_i\) is eliminated [110]. First-order sensitivity indices are usually used to understand the independent effect of a factor and to distinguish its individual versus interactive influence. It would be expected for linearly independent factors that they would only have first order indices (no interactions) that should correspond well with sensitivities obtained from simpler methods using OAT sampling.

+

Higher-order sensitivity indices explore the interaction between two or more parameters that contribute to model output variations. For example, a second-order index indicates how interactions between a pair of factors can lead to change in model output variance and is calculated using the following relationship:

+
+\[S_{ij}^2=\frac{V_{x_{i,j}}[E_{x_{\sim i,j}}(x_i,x_j)]}{V(y)}\]
+

with \(i \ne j\). Higher order indices can be calculated by similar extensions (i.e., fixing additional operators together), but it is usually computationally expensive in practice.

+

The total sensitivity analysis index represents the entire influence of an input factor on model outputs including all of its interactions with other factors [111]. In other words, total-order indices include first-order and all higher-order interactions associated with each factor and can be estimated calculated using the following:

+
+\[S_i^T= \frac{E_{x_{\sim i}}[V_{x_i}(x_{\sim i})]}{V(y)} = 1 - \frac{V_{x_{\sim i}}[E_{x_{i}}(x_{\sim i})]}{V(y)}\]
+

This index reveals the expected portion of variance that remains if uncertainty is eliminated in all factors but \(x_i\) [110]. The total sensitivity index is the overall best measure of sensitivity as it captures the full individual and interactive effects of model factors.

+

Besides the Sobol method, there are some other variance-based sensitivity analysis methods, such as the Fourier amplitude sensitivity test (FAST; [75, 112]) and extended-FAST [113, 114], that have been used by the scientific community. However, Sobol remains by far the most common method of this class. Variance-based techniques have been widely used and have proved to be powerful in a variety of applications. Despite their popularity, some authors have expressed concerns about the methods’ appropriateness in some settings. Specifically, the presence of heavy-tailed distributions or outliers, or when model outputs are multimodal can bias the sensitivity indices produced by these methods [115, 116, 117]. Moment-independent measures, discussed below, attempt to overcome these challenges.

+
+

Note

+

Put this into practice! Click the following link to try out an interactive tutorial which demonstrates the application of a Sobol sensitivity analysis: Sobol SA using SALib Jupyter Notebook

+
+
+
+

3.4.6. Analysis of Variance (ANOVA)#

+

Analysis of Variance (ANOVA) was first introduced by Fisher and others [118] and has since become a popular factor analysis method in physical experiments. ANOVA can be used as a sensitivity analysis method in computational experiments with a factorial design of experiment (referred to as factorial ANOVA). Note that Sobol can also be categorized as an ANOVA sensitivity analysis method, and that is why Sobol is sometimes referred to as a functional ANOVA [119]. Factorial ANOVA methods are particularly suited for models and problems that have discrete input spaces, significantly reducing the computational time. More information about these methods can be found in [119, 120, 121].

+
+
+

3.4.7. Moment-Independent (Density-Based) Methods#

+

These methods typically compare the entire distribution (i.e., not just the variance) of input and output parameters in order to determine the sensitivity of the output to a particular input variable. Several moment-independent sensitivity analysis methods have been proposed in recent years. The delta (\(\delta\)) moment-independent method calculates the difference between unconditional and conditional cumulative distribution functions of the output. The method was first introduced by [122, 123] and has become widely used in various disciplines. The \(\delta\) sensitivity index is defined as follows:

+
+\[S_i=\delta_i=\frac{1}{2}E_{x_i}|f_y(y)-f_{y|x_i}(y)|dy\]
+

where \(f_y(y)\) is the probability density function of the entire model output \(y\), and \(f_{y|x_i}(y)\) is the conditional density of \(y\), given that factor \(x_i\) assumes a fixed value. The \(\delta_i\) sensitivity indicator therefore represents the normalized expected shift in the distribution of \(y\) provoked by \(x_i\). Moment-independent methods are advantageous in cases where we are concerned about the entire distribution of events, such as when uncertain factors lead to more extreme events in a system [13]. Further, they can be used with a pre-existing sample of data, without requiring a specific sampling scheme, unlike the previously reviewed methods [124]. The \(\delta\) sensitivity index does not include interactions between factors and it is therefore akin to the first order index produced by the Sobol method. Interactions between factors can still be estimated using this method, by conditioning the calculation on more than one uncertain factor being fixed [123].

+
+
+
+

3.5. How To Choose A Sensitivity Analysis Method: Model Traits And Dimensionality#

+

Fig. 3.5, synthesized from variants found in [51, 91], presents a graphical synthesis of the methods overviewed in this section, with regards to their appropriateness of application based on the complexity of the model at hand and the computational limits on the number of model evaluations afforded. The bars below each method also indicate the sensitivity analysis purposes they are most appropriate to address, which are in turn a reflection of the motivations and research questions the sensitivity analysis is called to address. Computational intensity is measured as a multiple of the number of model factors that are considered uncertain (\(d\)). Increasing model complexity mandates that more advanced sensitivity analysis methods are applied to address potential nonlinearities, factor interactions, and discontinuities. Such methods can only be performed at increasing computational expense. For example, computationally cheap linear regression should not be used to assess factors’ importance if the model cannot be proven linear and the factors independent, because important relationships will invariably be missed (recall the example in Fig. 3.5). When computational limits do constrain applications to make simplified assumptions and sensitivity techniques, any conclusions in such cases should be delivered with clear statements of the appropriate caveats.

+
+Figure 3_5 + +
+

Fig. 3.5 Classification of the sensitivity analysis methods overviewed in this section, with regards to their computational cost (horizontal axis), their appropriateness to model complexity (vertical axis), and the purpose they can be used for (colored bars). d: number of uncertain factors considered; ANOVA: Analysis of Variance; FAST: Fourier Amplitude Sensitivity Test; PRIM: Patient Rule Induction Method; CART: Classification and Regression Trees; SRCC: Spearman’s rank correlation coefficient: NSE: Nash–Sutcliffe efficiency; SRC: standardized regression coefficient; PCC: Pearson correlation coefficient. This figure is synthesized from variants found in [51, 91].#

+
+
+

The reader should also be aware that the estimates of computational intensity that are given here are indicative of magnitude and would vary depending on the sampling technique, model complexity and the level of information being asked. For example, a Sobol sensitivity analysis typically requires a sample of size \(n * d+2\) to produce first- and total-order indices, where \(d\) is the number of uncertain factors and \(n\) is a scaling factor, selected ad hoc, depending on model complexity [46]. The scaling factor \(n\) is typically set to at least 1000, but it should most appropriately be set on the basis of index convergence. In other words, a prudent analyst would perform the analysis several times with increasing \(n\) and observe at what level the indices converge to stable values [125]. The level should be the minimum sample size used in subsequent sensitivity analyses of the same system. Furthermore, if the analyst would like to better understand the degrees of interaction between factors, requiring second-order indices, the sample size would have to increase to \(n * 2d+2\) [46].

+

Another important consideration is that methods that do not require specific sampling schemes can be performed in conjunction with others without requiring additional model evaluations. None of the regression-based methods, for example, require samples of specific structures or sizes, and can be combined with other methods for complementary purposes. For instance, one could complement a Sobol analysis with an application of CART, using the same data, but to address questions relating to factor mapping (e.g., we know factor \(x_i\) is important for a model output, but we would like to also know which of its values specifically push the output to undesirable states). Lastly, comparing results from different methods performed together can be especially useful in model diagnostic settings. For example, [13] used \(\delta\) indices, first-order Sobol indices, and \(R^2\) values from linear regression, all performed on the same factors, to derive insights about the effects on factors on different moments of the output distribution and about the linearity of their relationship.

+
+
+

3.6. Software Toolkits#

+

This section presents available open source sensitivity analysis software tools, based on the programming language they use and the methods they support Fig. 3.6. Our review covers five widely used programming languages: R, MATLAB, Julia, Python, and C++, as well as one tool that provides a graphical user interface (GUI). Each available SA tool was assessed on the number of SA methods and design of experiments methods it supports. For example, the sensobol package in R only supports the variance-based Sobol method. However, it is the only package we came across that calculates third-order interactions among parameters. On the other side of the spectrum, there are SA software packages that contain several popular SA methods. For example, SALib in Python [126] supports seven different SA methods. The DifferentialEquations package is a comprehensive package developed for Julia, and GlobalSensitivityAnalysis is another Julia package that has mostly adapted SALib methods. Fig. 3.6 also identifies the SA packages that have been updated since 2018, indicating active support and development.

+
+Figure 3_6 + +
+

Fig. 3.6 Sensitivity analysis packages available in different programming language platforms (R, Python, Julia, MATLAB, and C++), with the number of methods they support. Packages supporting more than five methods are indicated in pink. Packages updated since 2018 are indicated with asterisks.#

+
+
+
+

Note

+

The following articles are suggested as fundamental reading for the information presented in this section:

+
    +
  • Wagener, T., Pianosi, F., 2019. What has Global Sensitivity Analysis ever done for us? A systematic review to support scientific advancement and to inform policy-making in earth system modelling. Earth-Science Reviews 194, 1–18. https://doi.org/10.1016/j.earscirev.2019.04.006

  • +
  • Pianosi, F., Beven, K., Freer, J., Hall, J.W., Rougier, J., Stephenson, D.B., Wagener, T., 2016. Sensitivity analysis of environmental models: A systematic review with practical workflow. Environmental Modelling & Software 79, 214–232. https://doi.org/10.1016/j.envsoft.2016.02.008

  • +
+

The following articles can be used as supplemental reading:

+
    +
  • Saltelli, A., Ratto, M., Andres, T., Campolongo, F., Cariboni, J., Gatelli, D., Saisana, M., Tarantola, S., 2008. Global Sensitivity Analysis: The Primer, 1st edition. ed. Wiley-Interscience, Chichester, England ; Hoboken, NJ.

  • +
  • Montgomery, D.C., 2017. Design and analysis of experiments. John Wiley & Sons.

  • +
  • Iooss, B., Lemaître, P., 2015. A Review on Global Sensitivity Analysis Methods, in: Dellino, G., Meloni, C. (Eds.), Uncertainty Management in Simulation-Optimization of Complex Systems: Algorithms and Applications, Operations Research/Computer Science Interfaces Series. Springer US, Boston, MA, pp. 101–122. https://doi.org/10.1007/978-1-4899-7547-8_5

  • +
+
+
+
+ + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/4.1_understanding_errors_what_is_controlling_model_performance.html b/dev/docs/html/4.1_understanding_errors_what_is_controlling_model_performance.html new file mode 100644 index 0000000..90f9490 --- /dev/null +++ b/dev/docs/html/4.1_understanding_errors_what_is_controlling_model_performance.html @@ -0,0 +1,467 @@ + + + + + + + + + + + Understanding Errors: What Is Controlling Model Performance? — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Understanding Errors: What Is Controlling Model Performance?

+ +
+
+ +
+
+
+ + + + +
+ +
+

Understanding Errors: What Is Controlling Model Performance?#

+

Sensitivity analysis is a diagnostic tool when reconciling model outputs with observed data. It is helpful for clarifying how and under what conditions modeling choices (structure, parameterization, data inputs, etc.) propagate through model components and manifest in their effects on model outputs. This exploration is performed through carefully designed sampling of multiple combinations of input parameters and subsequent evaluation of the model structures that are emerging as controlling factors. Model structure and parameterization are two of the most commonly explored aspects of models that have been a central focus when evaluating their performance relative to available observations [17]. Addressing these issues plays an important role in establishing credibility in model predictions, particularly in the positivist natural sciences literature. Traditional model evaluations compare the model with observed data, and then rely on expert judgements of its acceptability based on the closeness between simulation and observation with one or a small number of selected metrics. This approach can be myopic, as it is often impossible to use one metric to attribute a certain error and to link that with different parts of the model and its parameters [127]. This means that, even when the error or fitting measure between the model estimates and observations is very small, it is not guaranteed that all the components in the model accurately represent the conceptual reality that the model is abstracting: propagated errors in different parts of the model might cancel each other out, or multiple parameterized implementations of the model can yield similar performance (i.e., equifinality [17]).

+

The inherent complexity of a system hinders accepting or rejecting a model based on one performance measure and different types of measures can aid in evaluating the various components of a model as essentially a multiobjective problem [25, 26]. In addition, natural systems mimicked by the models contain various interacting components that might act differently across spatial and temporal domains [51, 55]. This heterogeneity is lost when a single performance measure is used, as a highly dimensional and interactive system becomes aggregated through the averaging of spatial or temporal output errors [15]. Therefore, diagnostic error analyses should consider multiple error signatures across different scales and states of concern when seeking to understand how model performance relates to observed data (Fig. 4.1). Diverse error signatures can be used to measure the consistency of underlying processes and behaviors of the model and to evaluate the dynamics of model controls under changing temporal and spatial conditions [128]. Within this framework, even minimal information extracted from the data can be beneficial as it helps us unearth structural inadequacies in the model. In this context, proper selection of measures of model performance and the number of measures could play consequential roles in our understanding of the model and its predictions [129].

+

As discussed earlier, instead of the traditional focus on using deterministic prediction that results in a single error measure, many plausible states and spaces could be searched for making different inferences and quantifying uncertainties. This process also requires estimates of prior probability distributions of all the important parameters and quantification of model behavior across input space. One strategy to reduce the search space is filtering of some model alternatives that are not consistent with observations and known system behaviors. Those implausible parts of the search space can be referred to as non-physical or non-behavioral alternatives [50, 104]. This step is conducted before the Bayesian calibration exercise (see Chapter A).

+

A comprehensive model diagnostic workflow typically entails the components demonstrated in Fig. 4.1. The workflow begins with the selection of model input parameters and their plausible ranges. After the parameter selection, we need to specify the design of experiment (Chapter 3.3) and the sensitivity analysis method (Chapter 3.4) to be used. As previously discussed, these methods require different numbers of model simulations, and each method provides a different insights into the direct effects and interactions of the uncertain factors. In addition, the simulation time of the model and the available computational resources are two of the primary considerations that influence these decisions. After identifying the appropriate methods, we generate a matrix of input parameters, where each set of input parameters will be used to conduct a model simulation. The model can include one or more output variables that fluctuate in time and space. The next step is to analyze model performance by comparing model outputs with observations. As discussed earlier, the positivist model evaluation paradigm focuses on a single model performance metric (error), leading to a loss of information about model parameters and the suitability of the model’s structure. However, a thorough investigation of the temporal and spatial signatures of model outputs using various performance metrics or time- and space-varying sensitivity analyses can shed more light on the fitness of each parameter set and the model’s internal structure. This analysis provides diagnostic feedback on the importance and range of model parameters and can guide further improvement of the model algorithm.

+
+Figure 4.1 + +
+

Diagnostic evaluation of model fidelity using sensitivity analysis methods.#

+
+
+
+

Note

+

Put this into practice! Click the following badge to try out an interactive tutorial on implementing a time-varying sensitivity analysis of HYMOD model parameters: HYMOD Jupyter Notebook

+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/4.2_consequential_dynamics_what_is_controlling_model_behaviors_of_interest.html b/dev/docs/html/4.2_consequential_dynamics_what_is_controlling_model_behaviors_of_interest.html new file mode 100644 index 0000000..9752ad5 --- /dev/null +++ b/dev/docs/html/4.2_consequential_dynamics_what_is_controlling_model_behaviors_of_interest.html @@ -0,0 +1,484 @@ + + + + + + + + + + + Consequential Dynamics: What is Controlling Model Behaviors of Interest? — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Consequential Dynamics: What is Controlling Model Behaviors of Interest?

+ +
+
+ +
+
+
+ + + + +
+ +
+

Consequential Dynamics: What is Controlling Model Behaviors of Interest?#

+

Consequential changes in dynamic systems can take many forms, but most dynamic behavior can be categorized in a few basic patterns. Feedback structures inherent in a system, be they positive or negative, generate these patterns which can be grouped into three groups for the simplest of systems: exponential growth, goal seeking, and oscillation (Fig. 4.2). A positive (or self-reinforcing) feedback gives rise to exponential growth, a negative (or self-correcting) feedback gives rise to a goal seeking mode, and negative feedbacks with time delays give rise to oscillatory behavior. Nonlinear interactions between the system’s feedback structures can give rise to more complex dynamic behavior modes, examples of which are also shown in Fig. 4.2, adapted from Sterman [130].

+
+Figure 4.2 + +
+

Common modes of behavior in dynamic systems, occurring based on the presence of positive and negative feedback relationships, and linear and non-linear interactions. Adapted from Sterman [130].#

+
+
+

The nature of feedback processes in a dynamic system shapes its fundamental behavior: positive feedbacks generate their own growth, negative feedbacks self-limit, seeking balance and equilibrium. In this manner, feedback processes give rise to different regimes, multiples of which could be present in each mode of behavior. Consider a population of mammals growing exponentially until it reaches the carrying capacity of its environment (referred to as S-shaped growth). When the population is exponentially growing, the regime is dominated by positive feedback relationships that reinforce its growth. As the population approaches its carrying capacity limit, negative feedback structures begin to dominate, counteracting the growth and establishing a stable equilibrium. Shifts between regimes can be thought of as tipping points, mathematically defined as unstable equilibria, where the presence of positive feedbacks amplifies disturbances and moves the system to a new equilibrium point. In the case of stable equilibria, the presence of negative feedbacks dampens any small disturbance and maintains the system at a stable state. As different feedback relationships govern each regime, different factors (those making up each feedback mechanism) are activated and shape the states the system is found in, as well as define the points of equilibria.

+

For simple stylized models with a small number of states, system dynamics analysis can analytically derive these equilibria, the conditions for their stability and the factors determining them. The ability for this to be performed is, however, significantly challenged when it comes to systems that attempt to more closely resemble real complex systems. We argue this is the case for several reasons. First, besides generally exhibiting complex nonlinear dynamics, real world systems are also made up from larger numbers of interacting elements, which often makes the analytic derivation of system characteristics intractable [131, 132]. Second, human-natural systems temporally evolve and transform when human state-aware action is present. Consider, for instance, humans recreationally hunting the aforementioned population of mammals. Humans act based on the mammal population levels by enforcing hunting quotas or establishing protected territories or eliminating other predators. The mammal population reacts in a response, giving birth to ever changing state-action-consequence feedbacks, the path dependencies of which become difficult to diagnose and understand (e.g., [133]). Trying to simulate the combination of these two challenges (large numbers of state-aware agents interacting with a natural resource and with each other) produces intractable models that require advanced heuristics to analyze their properties and establish useful inferences.

+

Sensitivity analysis paired with exploratory modeling methods offers a promising set of tools to address these challenges. We present a simple demonstrative application based on Quinn et al. [134]. This stylistic example was first developed by Carpenter et al. [135] and represents a town that must balance its agricultural and industrial productivity with the pollution it creates in a downstream lake. Increased productivity allows for increased profits, which the town aims to maximize, but it also produces more pollution for the lake. Too much phosphorus pollution can cause irreversible eutrophication, a process known as “tipping” the lake. The model of phosphorus in the lake \(X_t\) at time \(t\) is governed by:

+
+\[X_{t+1}= X_{t}+a_{t}+\frac{X_{t}^q} {1+X_{t}^q}-bX_t+\varepsilon\]
+

where \(a_t \in [0,0.1]\) is the town’s pollution release at each timestep, \(b\) is the natural decay rate of phosphorus in the lake, \(q\) defines the lake’s recycling rate (primarily through sediments), and \(\varepsilon\) represents uncontrollable natural inflows of pollution modeled as a log-normal distribution with a given mean, \(\mu\), and standard deviation \(\sigma\).

+

Panels (a-c) in Fig. 4.3 plot the fluxes of phosphorus into the lake versus the mass accumulation of phosphorus in the lake. The red line corresponds to the phosphorus sinks in the lake (natural decay), given by \(bX_t\). The grey shaded area represents the lake’s phosphorus recycling flux, given by \(\frac{X_{t}^q} {1+X_{t}^q}\). The points of intersection indicate the system’s equilibria, two of which are stable, and one is unstable (also known as the tipping point). The stable equilibrium in the bottom left of the figure reflects an oligotrophic lake, whereas the stable equilibrium in the top right represents a eutrophic lake. With increasing phosphorus values, the tipping point can be crossed, and the lake will experience irreversible eutrophication, as the recycling rate would exceed the removal rate even if the town’s pollution became zero. In the absence of anthropogenic and natural inflows of pollution in the lake (\(a_t\) and \(\varepsilon\) respectively), the area between the bottom-left black point and the white point in the middle can be considered as the safe operating space, before emission levels cross the tipping point.

+
+Figure 4.3 + +
+

Fluxes of phosphorus with regards to mass of phosphorus in the lake and sensitivity analysis results, assuming \(b=0.42\) and \(q=2\). (a) Fluxes of phosphorus assuming no emmisions policy and no natural inflows. (b-c) Fluxes phosphorus when applying two different emissions policies. The “Best economic policy” and the “Most reliable policy” have been identified by Quinn et al. [134] and can be found at Quinn [136]. (d) Results of a sensitivity analysis on the parameters of the model most consequential to the reliability of the “Most reliable policy”. The code to replicate the sensitivity analysis can be found at Hadka [137]. Panels (a-c) are used courtesy of Julianne Quinn, University of Virginia.#

+
+
+

The town has identified two potential policies that can be used to manage this lake, one that maximizes its economic profits (“best economic policy”) and one that maximizes the time below the tipping point (“most reliable policy”). Panels (b-c) in Fig. 4.3 add the emissions from these policies to the recycling flux and show how the equilibria points shift as a result. In both cases the stable oligotrophic equilibrium increases and the tipping point decreases, narrowing the safe operating space [131, 138]. The best economic policy results in a much narrower space of action, with the tipping point very close to the oligotrophic equilibrium. The performance of both policies depends significantly on the system parameters. For example, a higher value of \(b\), the natural decay rate, would shift the red line upward, moving the equilibria points and widening the safe operating space. Inversely, a higher value of \(q\), the lake’s recycling rate, would shift the recycling line upward, moving the tipping point lower and decreasing the safe operating space. The assumptions under which these policies were identified are therefore critical to their performance and any potential uncertainty in the parameter values could be detrimental to the system’s objectives being met.

+

Sensitivity analysis can be used to clarify the role these parameters play on policy performance. Fig. 4.3 (d) shows the results of a Sobol sensitivity analysis on the reliability of the “most reliable” policy in a radial convergence diagram. The significance of each parameter is indicated by the size of circles corresponding to it. The size of the interior dark circle indicates the parameter’s first-order effects and the size of the exterior circle indicates the parameter’s total-order effects. The thickness of the lines between two parameters indicated the extent of their interaction (second-order effects). In this case, parameters \(b\) and \(q\) appear to have the most significant importance on the system, followed by the mean, \(\mu\), of the natural inflows. All these parameters function in a manner that shifts the location of the three equilibria and therefore policies that are identified ignoring this parametric uncertainty might fail to meet their intended goals.

+

It is worth mentioning that current sensitivity analysis methods are somewhat challenged in addressing several system dynamics analysis questions. The fundamental reason is that sensitivity analysis methods and tools have been developed to gauge numerical sensitivity of model output to changes in factor values. This is natural, as most simulation studies (e.g., all aforementioned examples) have been traditionally concerned with this type of sensitivity. In system dynamics modeling, however, a more important and pertinent concern is changes between regimes or between behavior modes (also known as bifurcations) as a result of changes in model factors [130, 139]. This poses two new challenges. First, identifying a change in regime depends on several characteristics besides a change in output value, like the rate and direction of change. Second, behavior mode changes are qualitative and discontinuous, as equilibria change in stability but also move in and out of existence.

+

Despite these challenges, recent advanced sensitivity analysis methods can help illuminate which factors in a system are most important in shaping boundary conditions (tipping points) between different regimes and determining changes in behavior modes. Reviewing such methods is outside the scope of this text, but the reader is directed to the examples of Eker et al. [22] and Hadjimichael et al. [133], who apply parameterised perturbation on the functional relationships of a system to study the effects of model structural uncertainty on model outputs and bifurcations, and Hekimoğlu and Barlas [139] and Steinmann et al. [140] who, following wide sampling of uncertain inputs, cluster the resulting time series in modes of behavior and identify most important factors for each.

+
+

Note

+

Put this into practice! Click the following badge to try out an interactive tutorial on performing a sensitivity analysis to discover consequential dynamics: Factor Discovery Jupyter Notebook

+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/4.3_consequential_scenarios_what_is_controlling_consequential_outcomes.html b/dev/docs/html/4.3_consequential_scenarios_what_is_controlling_consequential_outcomes.html new file mode 100644 index 0000000..4efe3ff --- /dev/null +++ b/dev/docs/html/4.3_consequential_scenarios_what_is_controlling_consequential_outcomes.html @@ -0,0 +1,483 @@ + + + + + + + + + + + Consequential Scenarios: What is Controlling Consequential Outcomes? — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Consequential Scenarios: What is Controlling Consequential Outcomes?

+ +
+
+ +
+
+
+ + + + +
+ +
+

Consequential Scenarios: What is Controlling Consequential Outcomes?#

+

As overviewed in Chapter 2.2, most models are abstractions of systems in the real world. When sufficient confidence has been established in a model, it can then act as a surrogate for the actual system, in that the consequences of potential stressors, proposed actions or other changes can be evaluated by computer model simulations [36]. A model simulation then represents a computational experiment, which can be used to assess how the modeled system would behave should the various changes come to be. Steven Bankes coined the term exploratory modeling to describe the use of large sets of such computational experiments to investigate their implications on the system. Fig. 4.4 presents a typical workflow of an exploratory modeling application. Exploratory modeling approaches typically use sampling designs to generate large ensembles of states that represent combinations of changes happening together, spanning the entire range of potential values a factor might take (indicated in Fig. 4.4 by numbers 2-5). This perspective on modeling is particularly relevant to studies making long term projections into the future.

+
+Figure 4_4 + +
+

A typical exploratory modeling workflow#

+
+
+

In the long-term policy analysis literature, exploratory modeling has prominently placed itself as an alternative to traditional narrative scenario or assumptions-based planning approaches, in what can be summarized in the following two-pronged critique [36, 141, 142]. The most prevalent criticism sees that the future and how it might evolve is both highly complex and deeply uncertain. Despite its benefits for interpretation and intuitive appeal, a small number of scenarios invariably misses many other potential futures that did not get selected as sufficiently representative of the future. This is especially the case for aggregate, narrative scenarios that describe simultaneous changes in multiple sectors together (e.g., “increased energy demand, combined with high agricultural land use and large economic growth”), such as the emission scenarios produced by the Intergovernmental Panel on Climate Change [143]. The bias introduced by this reduced set of potential changes can skew inferences drawn from the model, particularly when the original narrative scenarios are focused on a single or narrow set of measures of system behavior.

+

The second main criticism of traditional narrative scenario-based planning methods is that they provide no systematic way to distinguish which of the constituent factors lead to the undesirable consequences produced by a scenario. Narrative scenarios (e.g., the scenario matrix framework of RCPs-SSPs-SPAs; [144]) encompass multiple changes happening together selected to span the range of potential changes but are not typically generated in a systematic factorial manner that considers the multiple ways the factors can be combined. This has two critical limitations. It obfuscates the role each component factor plays in the system, both in isolation and in combination with others (e.g., “is it the increased energy demand or the high agricultural land use that cause unbearable water stress?”). It also renders the delineation of how much change in a factor is critical near impossible. Consider, for example, narrative scenario A with a 5% increase in energy demand, and scenario B with a 30% increase in energy demand, which would have dire consequences. At which point between 5% and 30% do the dire consequences actually begin to occur? Such questions cannot be answered without a wide exploration of the space of potential changes. It should be noted that for some levels of model complexity and computational demands (e.g., global-scale models) there is little feasible recourse beyond the use of narrative scenarios.

+

Exploratory modeling is typically paired with scenario discovery methods (indicated by number 9 in Fig. 4.4) that identify which of the scenarios (also known as states of the world) generated indeed have consequences of interest for stakeholders and policy makers, in an approach referred to as ensemble-based scenario-discovery [45, 102, 103]. This approach therefore flips the planning analysis from one that attempts to predict future system conditions to one that attempts to discover the (un)desirable future conditions. Ensemble-based scenario discovery can thus inform what modeling choices yield the most consequential behavioral changes or outcomes, especially when considering deeply uncertain, scenario-informed projections [9, 145]. The relative likelihoods and relevance of the discovered scenarios can be subsequently evaluated by the practitioners a posteriori, within a richer context of knowing the wider set of potential consequences [146]. This can include changing how an analysis is framed (number 10 in Fig. 4.4). For instance, one could initially focus on ensemble modeling of vulnerability using a single uncertain factor that is assumed to be well characterized by historical observations (e.g., streamflow; this step is represented by numbers 2-3 in Fig. 4.4). The analysis can then shift to include projections of more factors treated as deeply uncertain (e.g., urbanization, population demands, temperature, and snow-melt) to yield a far wider space of challenging projected futures. UC experiments contrasting these two framings can be highly valuable for tracing how vulnerability inferences change as the modeled space of futures expands from the historical baseline [134].

+

An important nuance to be clarified here is that the focus or purpose of a modeling exercise plays a major role in whether a given factor of interest is considered well-characterized or deeply uncertain. Take the example context of characterizing temperature or streamflow extremes, where for each state variable of interest for a given location of focus there is a century of historical observations. Clearly, the observation technologies will have evolved over time uniquely for temperature and streamflow measurements and they likely lack replicate experiments (data uncertainty). A century of record will be insufficient to capture very high impact and rare extreme events (i.e., increasingly poor structural/parametric inference for the distributions of specific extreme single or compound events). The mechanistic processes as well as their evolving variability will be interdependent but uniquely different for each of these state variables. A large body of statistical literature exists focusing on the topics of synthetic weather [81, 82] or streamflow [83, 84] generation that provides a rich suite of approaches for developing history-informed, well-characterized stochastic process models to better estimate rare individual or compound extremes. These history-focused approaches can be viewed as providing well-characterized quantifications of streamflow or temperature distributions; however, they do not capture how coupled natural-human processes can fundamentally change their dynamics when transitioning to projections of longer-term futures (e.g., streamflow and temperature in 2055). Consequently, changing the focus of the modeling to making long term projections of future streamflow or temperature now makes these processes deeply uncertain.

+

Scenario discovery methods (number 9 in Fig. 4.4) can be qualitative or quantitative and they generally attempt to distinguish futures in which a system or proposed policies to manage the system meet or miss their goals [103]. The emphasis placed by exploratory modeling on model outputs that have decision relevant consequences represents a shift toward a broader class of metrics that are reflective of the stakeholders’ concerns, agency and preferences (also discussed in Chapter 2.2). As a result, sensitivity analysis and scenario discovery methods in this context are therefore applied to performance metrics that go beyond model error but are rather focused on broader metrics such as the resilience of a sector, the reliability of a process, or the vulnerability of a population in the face of uncertainty. In exploratory modeling literature, this metric is most typically—but not always—a measure of robustness (number 8 in Fig. 13). Robustness is a property of a system or a design choice capturing its insensitivity to uncertainty and can be measured via a variety of means, most recently reviewed by Herman et al. [147] and McPhail et al. [129].

+

Scenario discovery is typically performed through the use of algorithms applied on large databases of model runs, generated through exploratory modeling, with each model run representing the performance of the system in one potential state of the world. The algorithms seek to identify the combinations of factor values (e.g., future conditions) that best distinguish the cases in which the system does or does not meet its objectives. The most widely known classification algorithms are the Patient Rule Induction Method (PRIM; [97]) and Classification and Regression Trees (CART; [98]). These factor mapping algorithms create orthogonal boundaries (multi-dimensional hypercubes) between states of the world that are successful or unsuccessful in meeting the system’s goals [65]. The algorithms attempt to strike a balance between simplicity of classification (and as a result, interpretability) and accuracy [45, 103, 148].

+

Even though these approaches have been shown to yield interpretable and relevant scenarios [149], several authors have pointed out the limitations of these methods with regards to their division of space in orthogonal behavioral and non-behavioral regions [150]. Due to their reliance on boundaries orthogonal to the uncertainty axes, PRIM and CART cannot capture interactions between the various uncertain factors considered, which can often be significant [151]. More advanced methods have been proposed to address this drawback, with logistic regression being perhaps the most prominent [151, 152, 153]. Logistic regression can produce boundaries that are not necessarily orthogonal to each uncertainty axis, nor necessarily linear, if interactive terms between two parameters are used to build the regression model. It also describes the probability that a state of the world belongs to the scenarios that lead to failure. This feature allows users to define regions of success based on a gradient of estimated probability of success in those worlds, unlike PRIM which only classifies states of the world in two regions [151, 154].

+

Another more advanced factor mapping method is boosted trees [99, 155]. Boosted trees can avoid two limitations inherent to the application of logistic regression: i) to build a nonlinear classification model the interactive term between two uncertainties needs to be pre-specified and cannot be discovered (e.g., we need to know a-priori whether factor \(x_1\) interacts with \(x_2\) in a relationship that looks like \(x_1\)·\(x_2\) or \(x_1^{x_2}\)); and ii) the subspaces defined are always convex. The application of such a factor mapping algorithm is limited in the presence of threshold-based rules with discrete actions in a modeled system (e.g., “if network capacity is low, build new infrastructure”), which results in failure regions that are nonlinear and non-convex [150]. Boosting works by creating an ensemble of classifiers and forcing some of them to focus on the hard-to-learn parts of the problem, and others to focus on the easy-to-learn parts. Boosting applied to CART trees can avoid the aforementioned challenges faced by other scenario discovery methods, while resisting overfitting [156], assuring the identified success and failure regions are still easy to interpret.

+

Below we provide an example application of two scenario discovery methods, PRIM and logistic regression, using the lake problem introduced in the previous section. From the sensitivity analysis results presented in Fig. 4.3 (d), we can already infer that parameters \(b\) and \(q\) have important effects on model outputs (i.e., we have performed factor prioritization). Scenario discovery (i.e., factor mapping) complements this analysis by further identifying the specific values of b and q that can lead to consequential and undesirable outcomes. For the purposes of demonstration, we can assume the undesirable outcome in this case is defined as the management policy failing to achieve 90% reliability in a state of the world.

+
+Figure 14. + +
+

Scenario discovery for the lake problem, using (a) PRIM and (b) logistic regression.#

+
+
+

Fig. 4.5 shows the results of scenario discovery, performed through (a) PRIM and (b) logistic regression. Each point in the two panels indicates a potential state of the world, generated through Latin Hypercube Sampling. Each point is colored by whether the policy meets the above performance criterion, with blue indicating success and red indicating failure. PRIM identifies several orthogonal areas of interest, one of which is shown in panel (a). As discussed above, this necessary orthogonality limits how PRIM identifies areas of success (the area within the box). As factors \(b\) and \(q\) interact in this system, the transition boundary between the regions of success and failure is not orthogonal to any of the axes. As a result, a large number of points in the bottom right and the top left of the figure are left outside of the identified region. Logistic regression can overcome this limitation by identifying a diagonal boundary between the two regions, seen in panel (b). This method also produces a gradient of estimated probability of success across these regions.

+
+

Note

+

Put this into practice! Click the following link to try out an interactive tutorial on performing factor mapping using logistic regression: Logistic Regression Jupyter Notebook

+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/4_sensitivity_analysis_diagnostic_and_exploratory_modeling.html b/dev/docs/html/4_sensitivity_analysis_diagnostic_and_exploratory_modeling.html new file mode 100644 index 0000000..fd3d0c8 --- /dev/null +++ b/dev/docs/html/4_sensitivity_analysis_diagnostic_and_exploratory_modeling.html @@ -0,0 +1,598 @@ + + + + + + + + + + + 4. Sensitivity Analysis: Diagnostic & Exploratory Modeling — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

4. Sensitivity Analysis: Diagnostic & Exploratory Modeling#

+
+

4.1. Understanding Errors: What Is Controlling Model Performance?#

+

Sensitivity analysis is a diagnostic tool when reconciling model outputs with observed data. It is helpful for clarifying how and under what conditions modeling choices (structure, parameterization, data inputs, etc.) propagate through model components and manifest in their effects on model outputs. This exploration is performed through carefully designed sampling of multiple combinations of input parameters and subsequent evaluation of the model structures that are emerging as controlling factors. Model structure and parameterization are two of the most commonly explored aspects of models that have been a central focus when evaluating their performance relative to available observations [17]. Addressing these issues plays an important role in establishing credibility in model predictions, particularly in the positivist natural sciences literature. Traditional model evaluations compare the model with observed data, and then rely on expert judgements of its acceptability based on the closeness between simulation and observation with one or a small number of selected metrics. This approach can be myopic, as it is often impossible to use one metric to attribute a certain error and to link that with different parts of the model and its parameters [127]. This means that, even when the error or fitting measure between the model estimates and observations is very small, it is not guaranteed that all the components in the model accurately represent the conceptual reality that the model is abstracting: propagated errors in different parts of the model might cancel each other out, or multiple parameterized implementations of the model can yield similar performance (i.e., equifinality [17]).

+

The inherent complexity of a system hinders accepting or rejecting a model based on one performance measure and different types of measures can aid in evaluating the various components of a model as essentially a multiobjective problem [25, 26]. In addition, natural systems mimicked by the models contain various interacting components that might act differently across spatial and temporal domains [51, 55]. This heterogeneity is lost when a single performance measure is used, as a highly dimensional and interactive system becomes aggregated through the averaging of spatial or temporal output errors [15]. Therefore, diagnostic error analyses should consider multiple error signatures across different scales and states of concern when seeking to understand how model performance relates to observed data (Fig. 4.1). Diverse error signatures can be used to measure the consistency of underlying processes and behaviors of the model and to evaluate the dynamics of model controls under changing temporal and spatial conditions [128]. Within this framework, even minimal information extracted from the data can be beneficial as it helps us unearth structural inadequacies in the model. In this context, proper selection of measures of model performance and the number of measures could play consequential roles in our understanding of the model and its predictions [129].

+

As discussed earlier, instead of the traditional focus on using deterministic prediction that results in a single error measure, many plausible states and spaces could be searched for making different inferences and quantifying uncertainties. This process also requires estimates of prior probability distributions of all the important parameters and quantification of model behavior across input space. One strategy to reduce the search space is filtering of some model alternatives that are not consistent with observations and known system behaviors. Those implausible parts of the search space can be referred to as non-physical or non-behavioral alternatives [50, 104]. This step is conducted before the Bayesian calibration exercise (see Chapter A).

+

A comprehensive model diagnostic workflow typically entails the components demonstrated in Fig. 4.1. The workflow begins with the selection of model input parameters and their plausible ranges. After the parameter selection, we need to specify the design of experiment (Chapter 3.3) and the sensitivity analysis method (Chapter 3.4) to be used. As previously discussed, these methods require different numbers of model simulations, and each method provides a different insights into the direct effects and interactions of the uncertain factors. In addition, the simulation time of the model and the available computational resources are two of the primary considerations that influence these decisions. After identifying the appropriate methods, we generate a matrix of input parameters, where each set of input parameters will be used to conduct a model simulation. The model can include one or more output variables that fluctuate in time and space. The next step is to analyze model performance by comparing model outputs with observations. As discussed earlier, the positivist model evaluation paradigm focuses on a single model performance metric (error), leading to a loss of information about model parameters and the suitability of the model’s structure. However, a thorough investigation of the temporal and spatial signatures of model outputs using various performance metrics or time- and space-varying sensitivity analyses can shed more light on the fitness of each parameter set and the model’s internal structure. This analysis provides diagnostic feedback on the importance and range of model parameters and can guide further improvement of the model algorithm.

+
+Figure 4.1 + +
+

Fig. 4.1 Diagnostic evaluation of model fidelity using sensitivity analysis methods.#

+
+
+
+

Note

+

Put this into practice! Click the following badge to try out an interactive tutorial on implementing a time-varying sensitivity analysis of HYMOD model parameters: HYMOD Jupyter Notebook

+
+
+
+

4.2. Consequential Dynamics: What is Controlling Model Behaviors of Interest?#

+

Consequential changes in dynamic systems can take many forms, but most dynamic behavior can be categorized in a few basic patterns. Feedback structures inherent in a system, be they positive or negative, generate these patterns which can be grouped into three groups for the simplest of systems: exponential growth, goal seeking, and oscillation (Fig. 4.2). A positive (or self-reinforcing) feedback gives rise to exponential growth, a negative (or self-correcting) feedback gives rise to a goal seeking mode, and negative feedbacks with time delays give rise to oscillatory behavior. Nonlinear interactions between the system’s feedback structures can give rise to more complex dynamic behavior modes, examples of which are also shown in Fig. 4.2, adapted from Sterman [130].

+
+Figure 4.2 + +
+

Fig. 4.2 Common modes of behavior in dynamic systems, occurring based on the presence of positive and negative feedback relationships, and linear and non-linear interactions. Adapted from Sterman [130].#

+
+
+

The nature of feedback processes in a dynamic system shapes its fundamental behavior: positive feedbacks generate their own growth, negative feedbacks self-limit, seeking balance and equilibrium. In this manner, feedback processes give rise to different regimes, multiples of which could be present in each mode of behavior. Consider a population of mammals growing exponentially until it reaches the carrying capacity of its environment (referred to as S-shaped growth). When the population is exponentially growing, the regime is dominated by positive feedback relationships that reinforce its growth. As the population approaches its carrying capacity limit, negative feedback structures begin to dominate, counteracting the growth and establishing a stable equilibrium. Shifts between regimes can be thought of as tipping points, mathematically defined as unstable equilibria, where the presence of positive feedbacks amplifies disturbances and moves the system to a new equilibrium point. In the case of stable equilibria, the presence of negative feedbacks dampens any small disturbance and maintains the system at a stable state. As different feedback relationships govern each regime, different factors (those making up each feedback mechanism) are activated and shape the states the system is found in, as well as define the points of equilibria.

+

For simple stylized models with a small number of states, system dynamics analysis can analytically derive these equilibria, the conditions for their stability and the factors determining them. The ability for this to be performed is, however, significantly challenged when it comes to systems that attempt to more closely resemble real complex systems. We argue this is the case for several reasons. First, besides generally exhibiting complex nonlinear dynamics, real world systems are also made up from larger numbers of interacting elements, which often makes the analytic derivation of system characteristics intractable [131, 132]. Second, human-natural systems temporally evolve and transform when human state-aware action is present. Consider, for instance, humans recreationally hunting the aforementioned population of mammals. Humans act based on the mammal population levels by enforcing hunting quotas or establishing protected territories or eliminating other predators. The mammal population reacts in a response, giving birth to ever changing state-action-consequence feedbacks, the path dependencies of which become difficult to diagnose and understand (e.g., [133]). Trying to simulate the combination of these two challenges (large numbers of state-aware agents interacting with a natural resource and with each other) produces intractable models that require advanced heuristics to analyze their properties and establish useful inferences.

+

Sensitivity analysis paired with exploratory modeling methods offers a promising set of tools to address these challenges. We present a simple demonstrative application based on Quinn et al. [134]. This stylistic example was first developed by Carpenter et al. [135] and represents a town that must balance its agricultural and industrial productivity with the pollution it creates in a downstream lake. Increased productivity allows for increased profits, which the town aims to maximize, but it also produces more pollution for the lake. Too much phosphorus pollution can cause irreversible eutrophication, a process known as “tipping” the lake. The model of phosphorus in the lake \(X_t\) at time \(t\) is governed by:

+
+\[X_{t+1}= X_{t}+a_{t}+\frac{X_{t}^q} {1+X_{t}^q}-bX_t+\varepsilon\]
+

where \(a_t \in [0,0.1]\) is the town’s pollution release at each timestep, \(b\) is the natural decay rate of phosphorus in the lake, \(q\) defines the lake’s recycling rate (primarily through sediments), and \(\varepsilon\) represents uncontrollable natural inflows of pollution modeled as a log-normal distribution with a given mean, \(\mu\), and standard deviation \(\sigma\).

+

Panels (a-c) in Fig. 4.3 plot the fluxes of phosphorus into the lake versus the mass accumulation of phosphorus in the lake. The red line corresponds to the phosphorus sinks in the lake (natural decay), given by \(bX_t\). The grey shaded area represents the lake’s phosphorus recycling flux, given by \(\frac{X_{t}^q} {1+X_{t}^q}\). The points of intersection indicate the system’s equilibria, two of which are stable, and one is unstable (also known as the tipping point). The stable equilibrium in the bottom left of the figure reflects an oligotrophic lake, whereas the stable equilibrium in the top right represents a eutrophic lake. With increasing phosphorus values, the tipping point can be crossed, and the lake will experience irreversible eutrophication, as the recycling rate would exceed the removal rate even if the town’s pollution became zero. In the absence of anthropogenic and natural inflows of pollution in the lake (\(a_t\) and \(\varepsilon\) respectively), the area between the bottom-left black point and the white point in the middle can be considered as the safe operating space, before emission levels cross the tipping point.

+
+Figure 4.3 + +
+

Fig. 4.3 Fluxes of phosphorus with regards to mass of phosphorus in the lake and sensitivity analysis results, assuming \(b=0.42\) and \(q=2\). (a) Fluxes of phosphorus assuming no emmisions policy and no natural inflows. (b-c) Fluxes phosphorus when applying two different emissions policies. The “Best economic policy” and the “Most reliable policy” have been identified by Quinn et al. [134] and can be found at Quinn [136]. (d) Results of a sensitivity analysis on the parameters of the model most consequential to the reliability of the “Most reliable policy”. The code to replicate the sensitivity analysis can be found at Hadka [137]. Panels (a-c) are used courtesy of Julianne Quinn, University of Virginia.#

+
+
+

The town has identified two potential policies that can be used to manage this lake, one that maximizes its economic profits (“best economic policy”) and one that maximizes the time below the tipping point (“most reliable policy”). Panels (b-c) in Fig. 4.3 add the emissions from these policies to the recycling flux and show how the equilibria points shift as a result. In both cases the stable oligotrophic equilibrium increases and the tipping point decreases, narrowing the safe operating space [131, 138]. The best economic policy results in a much narrower space of action, with the tipping point very close to the oligotrophic equilibrium. The performance of both policies depends significantly on the system parameters. For example, a higher value of \(b\), the natural decay rate, would shift the red line upward, moving the equilibria points and widening the safe operating space. Inversely, a higher value of \(q\), the lake’s recycling rate, would shift the recycling line upward, moving the tipping point lower and decreasing the safe operating space. The assumptions under which these policies were identified are therefore critical to their performance and any potential uncertainty in the parameter values could be detrimental to the system’s objectives being met.

+

Sensitivity analysis can be used to clarify the role these parameters play on policy performance. Fig. 4.3 (d) shows the results of a Sobol sensitivity analysis on the reliability of the “most reliable” policy in a radial convergence diagram. The significance of each parameter is indicated by the size of circles corresponding to it. The size of the interior dark circle indicates the parameter’s first-order effects and the size of the exterior circle indicates the parameter’s total-order effects. The thickness of the lines between two parameters indicated the extent of their interaction (second-order effects). In this case, parameters \(b\) and \(q\) appear to have the most significant importance on the system, followed by the mean, \(\mu\), of the natural inflows. All these parameters function in a manner that shifts the location of the three equilibria and therefore policies that are identified ignoring this parametric uncertainty might fail to meet their intended goals.

+

It is worth mentioning that current sensitivity analysis methods are somewhat challenged in addressing several system dynamics analysis questions. The fundamental reason is that sensitivity analysis methods and tools have been developed to gauge numerical sensitivity of model output to changes in factor values. This is natural, as most simulation studies (e.g., all aforementioned examples) have been traditionally concerned with this type of sensitivity. In system dynamics modeling, however, a more important and pertinent concern is changes between regimes or between behavior modes (also known as bifurcations) as a result of changes in model factors [130, 139]. This poses two new challenges. First, identifying a change in regime depends on several characteristics besides a change in output value, like the rate and direction of change. Second, behavior mode changes are qualitative and discontinuous, as equilibria change in stability but also move in and out of existence.

+

Despite these challenges, recent advanced sensitivity analysis methods can help illuminate which factors in a system are most important in shaping boundary conditions (tipping points) between different regimes and determining changes in behavior modes. Reviewing such methods is outside the scope of this text, but the reader is directed to the examples of Eker et al. [22] and Hadjimichael et al. [133], who apply parameterised perturbation on the functional relationships of a system to study the effects of model structural uncertainty on model outputs and bifurcations, and Hekimoğlu and Barlas [139] and Steinmann et al. [140] who, following wide sampling of uncertain inputs, cluster the resulting time series in modes of behavior and identify most important factors for each.

+
+

Note

+

Put this into practice! Click the following badge to try out an interactive tutorial on performing a sensitivity analysis to discover consequential dynamics: Factor Discovery Jupyter Notebook

+
+
+
+

4.3. Consequential Scenarios: What is Controlling Consequential Outcomes?#

+

As overviewed in Chapter 2.2, most models are abstractions of systems in the real world. When sufficient confidence has been established in a model, it can then act as a surrogate for the actual system, in that the consequences of potential stressors, proposed actions or other changes can be evaluated by computer model simulations [36]. A model simulation then represents a computational experiment, which can be used to assess how the modeled system would behave should the various changes come to be. Steven Bankes coined the term exploratory modeling to describe the use of large sets of such computational experiments to investigate their implications on the system. Fig. 4.4 presents a typical workflow of an exploratory modeling application. Exploratory modeling approaches typically use sampling designs to generate large ensembles of states that represent combinations of changes happening together, spanning the entire range of potential values a factor might take (indicated in Fig. 4.4 by numbers 2-5). This perspective on modeling is particularly relevant to studies making long term projections into the future.

+
+Figure 4_4 + +
+

Fig. 4.4 A typical exploratory modeling workflow#

+
+
+

In the long-term policy analysis literature, exploratory modeling has prominently placed itself as an alternative to traditional narrative scenario or assumptions-based planning approaches, in what can be summarized in the following two-pronged critique [36, 141, 142]. The most prevalent criticism sees that the future and how it might evolve is both highly complex and deeply uncertain. Despite its benefits for interpretation and intuitive appeal, a small number of scenarios invariably misses many other potential futures that did not get selected as sufficiently representative of the future. This is especially the case for aggregate, narrative scenarios that describe simultaneous changes in multiple sectors together (e.g., “increased energy demand, combined with high agricultural land use and large economic growth”), such as the emission scenarios produced by the Intergovernmental Panel on Climate Change [143]. The bias introduced by this reduced set of potential changes can skew inferences drawn from the model, particularly when the original narrative scenarios are focused on a single or narrow set of measures of system behavior.

+

The second main criticism of traditional narrative scenario-based planning methods is that they provide no systematic way to distinguish which of the constituent factors lead to the undesirable consequences produced by a scenario. Narrative scenarios (e.g., the scenario matrix framework of RCPs-SSPs-SPAs; [144]) encompass multiple changes happening together selected to span the range of potential changes but are not typically generated in a systematic factorial manner that considers the multiple ways the factors can be combined. This has two critical limitations. It obfuscates the role each component factor plays in the system, both in isolation and in combination with others (e.g., “is it the increased energy demand or the high agricultural land use that cause unbearable water stress?”). It also renders the delineation of how much change in a factor is critical near impossible. Consider, for example, narrative scenario A with a 5% increase in energy demand, and scenario B with a 30% increase in energy demand, which would have dire consequences. At which point between 5% and 30% do the dire consequences actually begin to occur? Such questions cannot be answered without a wide exploration of the space of potential changes. It should be noted that for some levels of model complexity and computational demands (e.g., global-scale models) there is little feasible recourse beyond the use of narrative scenarios.

+

Exploratory modeling is typically paired with scenario discovery methods (indicated by number 9 in Fig. 4.4) that identify which of the scenarios (also known as states of the world) generated indeed have consequences of interest for stakeholders and policy makers, in an approach referred to as ensemble-based scenario-discovery [45, 102, 103]. This approach therefore flips the planning analysis from one that attempts to predict future system conditions to one that attempts to discover the (un)desirable future conditions. Ensemble-based scenario discovery can thus inform what modeling choices yield the most consequential behavioral changes or outcomes, especially when considering deeply uncertain, scenario-informed projections [9, 145]. The relative likelihoods and relevance of the discovered scenarios can be subsequently evaluated by the practitioners a posteriori, within a richer context of knowing the wider set of potential consequences [146]. This can include changing how an analysis is framed (number 10 in Fig. 4.4). For instance, one could initially focus on ensemble modeling of vulnerability using a single uncertain factor that is assumed to be well characterized by historical observations (e.g., streamflow; this step is represented by numbers 2-3 in Fig. 4.4). The analysis can then shift to include projections of more factors treated as deeply uncertain (e.g., urbanization, population demands, temperature, and snow-melt) to yield a far wider space of challenging projected futures. UC experiments contrasting these two framings can be highly valuable for tracing how vulnerability inferences change as the modeled space of futures expands from the historical baseline [134].

+

An important nuance to be clarified here is that the focus or purpose of a modeling exercise plays a major role in whether a given factor of interest is considered well-characterized or deeply uncertain. Take the example context of characterizing temperature or streamflow extremes, where for each state variable of interest for a given location of focus there is a century of historical observations. Clearly, the observation technologies will have evolved over time uniquely for temperature and streamflow measurements and they likely lack replicate experiments (data uncertainty). A century of record will be insufficient to capture very high impact and rare extreme events (i.e., increasingly poor structural/parametric inference for the distributions of specific extreme single or compound events). The mechanistic processes as well as their evolving variability will be interdependent but uniquely different for each of these state variables. A large body of statistical literature exists focusing on the topics of synthetic weather [81, 82] or streamflow [83, 84] generation that provides a rich suite of approaches for developing history-informed, well-characterized stochastic process models to better estimate rare individual or compound extremes. These history-focused approaches can be viewed as providing well-characterized quantifications of streamflow or temperature distributions; however, they do not capture how coupled natural-human processes can fundamentally change their dynamics when transitioning to projections of longer-term futures (e.g., streamflow and temperature in 2055). Consequently, changing the focus of the modeling to making long term projections of future streamflow or temperature now makes these processes deeply uncertain.

+

Scenario discovery methods (number 9 in Fig. 4.4) can be qualitative or quantitative and they generally attempt to distinguish futures in which a system or proposed policies to manage the system meet or miss their goals [103]. The emphasis placed by exploratory modeling on model outputs that have decision relevant consequences represents a shift toward a broader class of metrics that are reflective of the stakeholders’ concerns, agency and preferences (also discussed in Chapter 2.2). As a result, sensitivity analysis and scenario discovery methods in this context are therefore applied to performance metrics that go beyond model error but are rather focused on broader metrics such as the resilience of a sector, the reliability of a process, or the vulnerability of a population in the face of uncertainty. In exploratory modeling literature, this metric is most typically—but not always—a measure of robustness (number 8 in Fig. 13). Robustness is a property of a system or a design choice capturing its insensitivity to uncertainty and can be measured via a variety of means, most recently reviewed by Herman et al. [147] and McPhail et al. [129].

+

Scenario discovery is typically performed through the use of algorithms applied on large databases of model runs, generated through exploratory modeling, with each model run representing the performance of the system in one potential state of the world. The algorithms seek to identify the combinations of factor values (e.g., future conditions) that best distinguish the cases in which the system does or does not meet its objectives. The most widely known classification algorithms are the Patient Rule Induction Method (PRIM; [97]) and Classification and Regression Trees (CART; [98]). These factor mapping algorithms create orthogonal boundaries (multi-dimensional hypercubes) between states of the world that are successful or unsuccessful in meeting the system’s goals [65]. The algorithms attempt to strike a balance between simplicity of classification (and as a result, interpretability) and accuracy [45, 103, 148].

+

Even though these approaches have been shown to yield interpretable and relevant scenarios [149], several authors have pointed out the limitations of these methods with regards to their division of space in orthogonal behavioral and non-behavioral regions [150]. Due to their reliance on boundaries orthogonal to the uncertainty axes, PRIM and CART cannot capture interactions between the various uncertain factors considered, which can often be significant [151]. More advanced methods have been proposed to address this drawback, with logistic regression being perhaps the most prominent [151, 152, 153]. Logistic regression can produce boundaries that are not necessarily orthogonal to each uncertainty axis, nor necessarily linear, if interactive terms between two parameters are used to build the regression model. It also describes the probability that a state of the world belongs to the scenarios that lead to failure. This feature allows users to define regions of success based on a gradient of estimated probability of success in those worlds, unlike PRIM which only classifies states of the world in two regions [151, 154].

+

Another more advanced factor mapping method is boosted trees [99, 155]. Boosted trees can avoid two limitations inherent to the application of logistic regression: i) to build a nonlinear classification model the interactive term between two uncertainties needs to be pre-specified and cannot be discovered (e.g., we need to know a-priori whether factor \(x_1\) interacts with \(x_2\) in a relationship that looks like \(x_1\)·\(x_2\) or \(x_1^{x_2}\)); and ii) the subspaces defined are always convex. The application of such a factor mapping algorithm is limited in the presence of threshold-based rules with discrete actions in a modeled system (e.g., “if network capacity is low, build new infrastructure”), which results in failure regions that are nonlinear and non-convex [150]. Boosting works by creating an ensemble of classifiers and forcing some of them to focus on the hard-to-learn parts of the problem, and others to focus on the easy-to-learn parts. Boosting applied to CART trees can avoid the aforementioned challenges faced by other scenario discovery methods, while resisting overfitting [156], assuring the identified success and failure regions are still easy to interpret.

+

Below we provide an example application of two scenario discovery methods, PRIM and logistic regression, using the lake problem introduced in the previous section. From the sensitivity analysis results presented in Fig. 4.3 (d), we can already infer that parameters \(b\) and \(q\) have important effects on model outputs (i.e., we have performed factor prioritization). Scenario discovery (i.e., factor mapping) complements this analysis by further identifying the specific values of b and q that can lead to consequential and undesirable outcomes. For the purposes of demonstration, we can assume the undesirable outcome in this case is defined as the management policy failing to achieve 90% reliability in a state of the world.

+
+Figure 14. + +
+

Fig. 4.5 Scenario discovery for the lake problem, using (a) PRIM and (b) logistic regression.#

+
+
+

Fig. 4.5 shows the results of scenario discovery, performed through (a) PRIM and (b) logistic regression. Each point in the two panels indicates a potential state of the world, generated through Latin Hypercube Sampling. Each point is colored by whether the policy meets the above performance criterion, with blue indicating success and red indicating failure. PRIM identifies several orthogonal areas of interest, one of which is shown in panel (a). As discussed above, this necessary orthogonality limits how PRIM identifies areas of success (the area within the box). As factors \(b\) and \(q\) interact in this system, the transition boundary between the regions of success and failure is not orthogonal to any of the axes. As a result, a large number of points in the bottom right and the top left of the figure are left outside of the identified region. Logistic regression can overcome this limitation by identifying a diagonal boundary between the two regions, seen in panel (b). This method also produces a gradient of estimated probability of success across these regions.

+
+

Note

+

Put this into practice! Click the following link to try out an interactive tutorial on performing factor mapping using logistic regression: Logistic Regression Jupyter Notebook

+
+
+

Note

+

The following articles are suggested as fundamental reading for the information presented in this section:

+
    +
  • Gupta, H.V., Wagener, T., Liu, Y., 2008. Reconciling theory with observations: elements of a diagnostic approach to model evaluation. Hydrological Processes: An International Journal 22, 3802–3813.

  • +
  • Bankes, S., 1993. Exploratory Modeling for Policy Analysis. Operations Research 41, 435–449. https://doi.org/10.1287/opre.41.3.435

  • +
  • Groves, D.G., Lempert, R.J., 2007. A new analytic method for finding policy-relevant scenarios. Global Environmental Change 17, 73–85. https://doi.org/10.1016/j.gloenvcha.2006.11.006

  • +
+

The following articles can be used as supplemental reading:

+ +
+
+
+ + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/5_conclusion.html b/dev/docs/html/5_conclusion.html new file mode 100644 index 0000000..f0f0390 --- /dev/null +++ b/dev/docs/html/5_conclusion.html @@ -0,0 +1,472 @@ + + + + + + + + + + + 5. Conclusion — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Conclusion

+ +
+
+ +
+
+
+ + + + +
+ +
+

5. Conclusion#

+

As noted in the Introduction (Chapter 1), the computational and conceptual challenges of the multi-model, transdisciplinary workflows that characterize ambitious projects such as IM3 have limited UC and UQ analyses. Moreover, the very nature and purpose of modeling and diagnostic model evaluation can have very diverse philosophical framings depending on the disciplines involved (see Figure_1_1 and Chapter 2.2). The guidance provided in this text can be used to frame consistent and rigorous experimental designs for better understanding the consequences and insights from our modeling choices when seeking to capture complex human-natural systems. The progression of sections of this text provide a thorough introduction of the concepts and definitions of diagnostic model evaluation, sensitivity analysis and UC. In addition, we comprehensively discuss how specific modeling objectives and applications should guide the selection of appropriate techniques; broadly, these can include model diagnostics, in-depth analysis of the behavior of the abstracted system, and projections under conditions of deep uncertainty. This text also contains a detailed presentation of the main sensitivity analysis methods and a discussion of their features and main limitations. Readers are also provided with an overview of computer tools and platforms that have been developed and could be considered in addressing IM3 scientific questions. The appendices of this text include an overview of UQ methods, a terminology glossary of the key concepts as well as example test cases and scripts to showcase various UC related capabilities.

+

Although we distinguish the UC and UQ model diagnostics, the reader should note that we suggest an overall consistent approach to both in this text by emphasizing “exploratory modeling” (see review by Moallemi et al. [9]). Although data support, model complexity, and computational limits strongly distinguish the feasibility and appropriateness of various UC diagnostic tools (e.g., see Fig. 3.5), we overall recommend that modelers view their work through the lens of cycles of learning. Iterative and deliberative exploration of model-based hypotheses and inferences for transdisciplinary teams is non-trivial and ultimately critical for mapping where innovations or insights are most consequential. Overall, we recommend approaching modeling with an openness to the diverse disciplinary perspectives such as those mirrored by the IM3 family of models in a progression from evaluating models relative to observed history to advanced formalized analyses to make inferences on multi-sector, multi-scale vulnerabilities and resilience. Exploratory modeling approaches can help fashion experiments with large numbers of alternative hypotheses on the co-evolutionary dynamics of influences, stressors, as well as path-dependent changes in the form and function of coupled human-natural systems [37]. This text guides the reader through the use of sensitivity analysis and uncertainty methods across the diverse perspectives that have shaped modern diagnostic and exploratory modeling.

+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/6_glossary.html b/dev/docs/html/6_glossary.html new file mode 100644 index 0000000..3f8ee60 --- /dev/null +++ b/dev/docs/html/6_glossary.html @@ -0,0 +1,499 @@ + + + + + + + + + + + Glossary — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Glossary

+ +
+
+ +
+
+
+ + + + +
+ +
+

Glossary#

+

Design of experiment: Provides a framework for the extraction of all plausible information about the impact of each factor on the output of the numerical model

+

Exploratory modeling: Use of large ensembles of uncertain conditions to discover decision-relevant combinations of uncertain factors

+

Factor: Any model component that can affect model outputs: inputs, resolution levels, coupling relationships, model relationships and parameters. In models with acceptable model fidelity these factors may represent elements of the real-world system under study.

+

Factor mapping: A technique to identify which uncertain model factors lead to certain model behavior

+

Factor prioritization: A technique to identify the uncertain factors which, when fixed to their true value, would lead to the greatest reduction in output variability

+

Factor screening: A technique to identify model components that have a negligible effect or make no significant contributions to the variability of the outputs or metrics of interest

+

First-, second-, total-order effects: First-order effects indicate the percent of model output variance contributed by a factor individually. Second-order effects capture how interactions between a pair of parameter input variables can lead to change in model output. Total-order effects consider all the effects a factor has, individually and in interaction with other factors.

+

Hindcasting: A type of predictive check that uses the model to estimate output for past events to see how well the output matches the known results.

+

Pre-calibration: A hybrid uncertainty assessment method that involves identifying a plausible set of parameters using some prespecified screening criterion, such as the distance from the model results to the observations.

+

Prior: The best assessment of the probability of an event based on existing knowledge before a new experiment is conducted

+

Posterior: The revised or updated probability of an event after taking into account new information

+

Probabilistic inversion: Uses additional information, for instance, a probabilistic expert assessment or survey result, to update an existing prior distribution

+

Return level: A value that is expected to be equaled or exceeded on average once every interval of time (T) (with a probability of 1/T)

+

Return period: The estimated time interval between events of a similar size or intensity/

+

Sampling: The process of selecting model parameters or inputs that characterize the model uncertainty space.

+

Scenario discovery: Use of large ensembles of uncertain conditions to discover decision-relevant combinations of uncertain factors

+

Sensitivity analysis: Conducted to understand the factors and processes that most (or least) control a model’s outputs

+
+

Local sensitivity analysis: Model evaluation performed by varying uncertain factors around specific reference values

+

Global sensitivity analysis: Model evaluation performed by varying uncertain factors throughout their entire feasible value space

+
+

Uncertainty

+

Deep uncertainty: Refers to situations where expert opinions consulted on a decision do not know or cannot agree on system boundaries, or the outcomes of interest and their relative importance, or the prior probability Distribution for the various uncertain factors present

+

Epistemic uncertainty: Systematic uncertainty that comes about due to the lack of knowledge or data to choose the best model

+

Ontological uncertainty: Uncertainties due to processes, interactions, or futures, that are not contained within current conceptual models

+

Aleatory uncertainty: Uncertainty due to natural randomness in processes

+

Uncertainty characterization: Model evaluation under alternative factor hypotheses to explore their implications for model output uncertainty

+

Uncertainty quantification: Representation of model output uncertainty using probability distributions

+

Variance decomposition: A technique to partition how much of the variability in a model’s output is due to different explanatory variables.

+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A1.1_UQ_Introduction.html b/dev/docs/html/A1.1_UQ_Introduction.html new file mode 100644 index 0000000..abd0026 --- /dev/null +++ b/dev/docs/html/A1.1_UQ_Introduction.html @@ -0,0 +1,464 @@ + + + + + + + + + + + Introduction — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Introduction

+ +
+
+ +
+
+
+ + + + +
+ +
+

Introduction#

+

As defined in Chapter 1, uncertainty quantification (UQ) refers to the formal focus on the full specification of likelihoods as well as distributional forms necessary to infer the joint probabilistic response across all modeled factors of interest [8]. This is in contrast to UC (the primary focus of the main document of this book), which is instead aimed at identifying which modeling choices yield the most consequential changes or outcomes and exploring alternative hypotheses related to the form and function of modeled systems [9, 10].

+

UQ is important for quantifying the relative merits of hypotheses for at least three main reasons. First, identifying model parameters that are consistent with observations is an important part of model development. Due to several effects, including correlations between parameters, simplified or incomplete model structures (relative to the full real-world dynamics), and uncertainty in the observations, many different combinations of parameter values can be consistent with the model structure and the observations to varying extents. Accounting for this uncertainty is conceptually preferable to selecting a single “best fit” parameter vector, particularly as consistency with historical or present observations does not necessarily guarantee skillful future projections.

+

The act of quantification requires specific assumptions about distributional forms and likelihoods, which may be more or less justified depending on prior information about the system or model behavior. As a result, UQ is well-suited for studies accounting for or addressing hypotheses related to systems with a relatively large amount of available data and models which are computationally inexpensive, particularly when the emphasis is on prediction. As shown in Fig. A.1, there is a fundamental tradeoff between the available number of model evaluations (for a fixed computational budget) and the number of parameters treated as uncertain. Sensitivity analyses are therefore part of a typical UQ workflow to identify which factors can be fixed and which ought to be prioritized in the UQ.

+
+Figure A1.1 + +
+

Overview of selected existing approaches for uncertainty quantification and their appropriateness given the number of uncertain model parameters and the number of available model simulations. Green shading denotes regions suitable for uncertainty quantification and red shading indicates regions more appropriate for uncertainty characterization.#

+
+
+

The choice of a particular UQ method depends on both the desired level of quantification and the ability to navigate the tradeoff between computational expense and the number of uncertain parameters (Fig. A.1). For example, Markov chain Monte Carlo with a full system model can provide an improved representation of uncertainty compared to the coarser pre-calibration approach [157], but requires many more model evaluations. The use of a surrogate model to approximate the full system model can reduce the number of needed model evaluations by several orders of magnitude, but the uncertainty quantification can only accommodate a limited number of parameters.

+

The remainder of this appendix will focus on introducing workflows for particular UQ methods, including a brief discussion of advantages and limitations.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A1.2_Parametric_Bootstrap.html b/dev/docs/html/A1.2_Parametric_Bootstrap.html new file mode 100644 index 0000000..9e4fb2e --- /dev/null +++ b/dev/docs/html/A1.2_Parametric_Bootstrap.html @@ -0,0 +1,462 @@ + + + + + + + + + + + Parametric Bootstrap — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Parametric Bootstrap

+ +
+
+ +
+
+
+ + + + +
+ +
+

Parametric Bootstrap#

+

The parametric bootstrap [158] refers to a process of model recalibration to alternate realizations of the data. The bootstrap was originally developed to estimate standard errors and confidence intervals without ascertaining key assumptions that might not hold given the available data. In a setting where observations can be viewed as independent realizations of an underlying stochastic process, a sufficiently rich dataset can be treated as a population representing the data distribution. New datasets are then generated by resampling from the data with replacement, and the model can be refit to each new dataset using maximum-likelihood estimation. The resulting distribution of estimates can then be viewed as a representation of parametric uncertainty.

+

A typical workflow for the parametric bootstrap is shown in Fig. A.2. After identifying outputs of interest and preparing the data, the parametric model is fit by some procedure such as minimizing root-mean-square-error or maximizing the likelihood. Alternate datasets are constructed by resampling from the population or by generating new samples from the fitted data-generating process. It is important at this step that the resampled quantities are independent of one another. For example, in the context of temporally- or spatially-correlated data, such as time series, the raw observations cannot be treated as independent realizations. However, the residuals resulting from fitting the model to the data could be (depending on their structure). For example, if the residuals are treated as independent, they can then be resampled with replacement, and these residuals added to the original model fit to create new realizations. If the residuals are assumed to be the result of an autoregressive process, this process could be fit to the original residual series and new residuals be created using this model [159]. The model is then refit to each new realization.

+
+Figure A1.2 + +
+

Workflow for the parametric bootstrap.#

+
+
+

The bootstrap is computationally convenient, particularly as the process of fitting the model to each realization can be easily parallelized. This approach also requires minimal prior assumptions. However, due to the assumption that the available data are representative of the underlying data distribution, the bootstrap can neglect key uncertainties which might influence the results. For example, when using an autoregressive process to generate new residuals, uncertainty in the autocorrelation parameter and innovation variance is neglected, which may bias estimates of, for example, low-probability but high-impact events [160].

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A1.3_Precalibration.html b/dev/docs/html/A1.3_Precalibration.html new file mode 100644 index 0000000..d98ff50 --- /dev/null +++ b/dev/docs/html/A1.3_Precalibration.html @@ -0,0 +1,468 @@ + + + + + + + + + + + Pre-Calibration — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Pre-Calibration

+ +
+
+ +
+
+
+ + + + +
+ +
+

Pre-Calibration#

+

Pre-calibration [18, 161, 162] involves the identification of a plausible set of parameters using some prespecified screening criterion, such as the distance from the model results to the observations (based on an appropriate metric for the desired matching features, such as root-mean-squared error). A typical workflow is shown in Fig. A.3. Parameter values are obtained by systematically sampling the input space (see Chapter 3.3). After the model is evaluated at the samples, only those passing the distance criterion are retained. This selects a subset of the parameter space as “plausible” based on the screening criterion, though there is no assignment of probabilities within this plausible region.

+
+Figure A1.3 + +
+

Workflow for pre-calibration.#

+
+
+

Pre-calibration can be useful for models which are inexpensive enough that a reasonable +number of samples can be used to represent the parameter space, but which are too expensive to facilitate full uncertainty quantification. High-dimensional parameter spaces, which can be problematic for the uncertainty quantification methods below, may also be explored using pre-calibration. One key prerequisite to using this method is the ability to place a meaningful distance metric on the output space.

+

However, pre-calibration results in a very coarse characterization of uncertainty, especially when considering a large number of parameters, as more samples are needed to fully characterize the parameter space. Due to the inability to evaluate the relative probability of regions of the parameter space beyond the binary plausible-and-implausible characterization, pre-calibration can also result in degraded hindcast and projection skills and parameter estimates [157, 163, 164].

+

A related method, widely used in hydrological studies, is generalized likelihood uncertainty estimation, or GLUE [18]. Unlike pre-calibration, the underlying argument for GLUE relies on the concept of equifinality [165], which posits that it is impossible to find a uniquely well-performing parameter vector for models of abstract environmental systems [165, 166]. In other words, there exist multiple parameter vectors which perform equally or similarly well. As with pre-calibration, GLUE uses a goodness-of-fit measure (though this is called a “likelihood” in the GLUE literature, as opposed to a statistical likelihood function [167]) to evaluate samples. After setting a threshold of acceptable performance with respect to that measure, samples are evaluated and classified into “behavioral” or “non-behavioral” according to the threshold.

+
+

Note

+

Put this into practice! Click the following badge to try out an interactive tutorial on utilizing Pre-Calibration and GLUE for HYMOD model calibration: Pre-Calibration Jupyter Notebook

+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A1.4_Markov_Chain_Monte_Carlo.html b/dev/docs/html/A1.4_Markov_Chain_Monte_Carlo.html new file mode 100644 index 0000000..5397a2c --- /dev/null +++ b/dev/docs/html/A1.4_Markov_Chain_Monte_Carlo.html @@ -0,0 +1,465 @@ + + + + + + + + + + + Markov Chain Monte Carlo — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Markov Chain Monte Carlo

+ +
+
+ +
+
+
+ + + + +
+ +
+

Markov Chain Monte Carlo#

+

Markov chain Monte Carlo (MCMC) is a “gold standard” approach to full uncertainty quantification. MCMC refers to a category of algorithms which systematically sample from a target distribution (in this case, the posterior distribution) by constructing a Markov chain. A Markov chain is a probabilistic structure consisting of a state space, an initial probability distribution over the states, and a transition distribution between states. If a Markov chain satisfies certain properties [168, 169], the probability of being in each state will eventually converge to a stable, or stationary, distribution, regardless of the initial probabilities.

+

MCMC algorithms construct a Markov chain of samples from a parameter space (the combination of model and statistical parameters). This Markov chain is constructed so that the stationary distribution is a target distribution, in this case the (Bayesian) posterior distribution. As a result, after the transient period, the resulting samples can be viewed as a set of dependent samples from the posterior (the dependence is due to the autocorrelation between samples resulting from the Markov chain transitions). Expected values can be computed from these samples (for example, using batch-means estimators [170]), or the chain can be sub-sampled or thinned and the resulting samples used as independent Monte Carlo samples due to the reduced or eliminated autocorrelation.

+

A general workflow for MCMC is shown in Fig. A.4. The first decision is whether to use the full model or a surrogate model (or emulator). Typical surrogates include Gaussian process emulation [171, 172], polynomial chaos expansions [173, 174], support vector machines [175, 176], and neural networks [177, 178]. Surrogate modeling can be faster, but requires a sufficient number of model evaluations for the surrogate to accurately represent the model’s response surface, and this typically limits the number of parameters which can be included in the analysis.

+
+Figure A1.4 + +
+

Workflow for Markov chain Monte Carlo.#

+
+
+

After selecting the variables which will be treated as uncertain, the next step is to specify the likelihood based on the selected surrogate model or the structure of the data-model residuals. For example, it may not always be appropriate to treat the residuals as independent and identically distributed (as is commonly done in linear regression). A mis-specification of the residual structure can result in biases and over- or under-confident inferences and projections [179].

+

After specifying the prior distributions (see Chapter A.6), the selected MCMC algorithm should be used to draw samples from the posterior distribution. There are many MCMC algorithms, all of which have advantages and disadvantages for a particular problem. These include the Metropolis-Hastings algorithm [169] and Hamiltonian Monte Carlo [180, 181]. Software packages typically implement one MCMC method, sometimes designed for a particular problem setting or likelihood specification. For example, R’s adaptMCMC implements an adaptive Metropolis-Hastings algorithm [182], while NIMBLE [183, 184] uses a user-customizable Metropolis-Hastings implementation, as well as functionality for Gibbs sampling (which is a special case of Metropolis-Hastings where the prior distribution has a convenient mathematical form). Some recent implementations, such as Stan [185], pyMC3 [186], and Turing [187] allow different algorithms to be used.

+

A main consideration when using MCMC algorithms is testing for convergence to the target distribution. As convergence is guaranteed only for a sufficiently large number of transitions, it is impossible to conclude for certain that a chain has converged for a fixed number of iterations. However, several heuristics have been developed [170, 188] to increase evidence that convergence has occurred.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A1.5_Other_methods.html b/dev/docs/html/A1.5_Other_methods.html new file mode 100644 index 0000000..dcce9e8 --- /dev/null +++ b/dev/docs/html/A1.5_Other_methods.html @@ -0,0 +1,454 @@ + + + + + + + + + + + Other Methods — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Other Methods

+ +
+
+ +
+
+
+ + + + +
+ +
+

Other Methods#

+

Other common methods for UQ exist. These include sequential Monte Carlo, otherwise known as particle filtering [189, 190, 191], where a number of particles are used to evaluate samples. An advantage of sequential Monte Carlo is that the vast majority of the computation can be parallelized, unlike with standard MCMC. A major weakness is the potential for degeneracy [190], where many particles have extremely small weights, resulting in the effective use of only a few samples.

+

Another method is approximate Bayesian computation (ABC) [192, 193, 194]. ABC is a likelihood-free approach that compares model output to a set of summary statistics. ABC is therefore well-suited for models and residual structures which do not lend themselves to a computationally-tractable likelihood, but the resulting inferences are known to be biased if the set of summary statistics is not sufficient, which can be difficult to know a-priori.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A1.6_Critical_first_step.html b/dev/docs/html/A1.6_Critical_first_step.html new file mode 100644 index 0000000..8271533 --- /dev/null +++ b/dev/docs/html/A1.6_Critical_first_step.html @@ -0,0 +1,464 @@ + + + + + + + + + + + The Critical First Step: How to Choose a Prior Distribution — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

The Critical First Step: How to Choose a Prior Distribution

+ +
+
+ +
+
+
+ + + + +
+ +
+

The Critical First Step: How to Choose a Prior Distribution#

+

Prior distributions play an important role in Bayesian uncertainty quantification, particularly when data is limited relative to the dimension of the model. Bayesian updating can be thought of as an information filter, where each additional datum is added to the information contained in the prior; eventually, the prior makes relatively little impact. In real world problems, it can be extremely difficult to assess how much data is required for the choice of prior to become less relevant. The choice of prior can also be influential when conducting SA prior to or without UQ. This is because a prior distribution for a sensitivity analysis for a given parameter which is much wider than the region where the model response surface is sensitive to the parameter value might cause the sensitivity calculation to underestimate the response in that potentially critical region. Similarly, a prior which is too narrow may miss regions where the model responds to the parameter altogether.

+

Ideally, prior distributions are constructed independently of any analysis of the new data considered. This is because using data to inform the prior as well as to compute the likelihood reuses information in a potentially inappropriate way, which can lead to overconfident inferences. Following Jaynes [195], Gelman et al. [196] refers to the ideal prior as one which encodes all available information about the model. For practical reasons (difficulty of construction or computational inconvenience), most priors fail to achieve this ideal. These compromises mean that priors should be transparently articulated and justified, so that the impact of the choice of prior can be fully understood. When there is ambiguity about an appropriate prior, such as how fat the tails should be, an analyst should examine how sensitive the UQ results are to the choice of prior.

+

Priors can also be classified in terms of the information encoded by them, demonstrated in Fig. A.5. Non-informative priors (illustrated in Fig. A.5 (a)) allegedly correspond to (and are frequently justified by) a position of ignorance. A classic example is the use of a uniform distribution. A uniform prior can, however, be problematic, as it can lead to improper inferences by giving extremely large values the same prior probability as values which may seem more likely [196, 197], and therefore does not really reflect a state of complete ignorance. In the extreme case of a uniform prior over the entire real line, every particular region has effectively a prior weight of zero, even though not all regions are a priori unlikely [197]. Moreover, a uniform prior which excludes possible parameter values is not actually noninformative, as it assigns zero probability to those values while jumping to a nonzero probability as soon as the boundary is crossed. While a uniform prior can be problematic for the task of uncertainty quantification, it may be useful for an initial sensitivity analysis to identify the boundary of any regions where the model is sensitive to the parameter.

+
+Figure A1.5 + +
+

Impact of priors on posterior inferences. These plots show the results of inference for a linear regression model with 15 data points. The true value of the parameter is equal to -3. All priors have mean 0. In panel (a), a non-informative prior allows the tails of the posterior to extend freely, which may result in unreasonably large parameter values. In panel (b), a weakly informative prior constrains the tails more, but allows them to extend without too much restriction. In panel (c), an informative prior strongly constrains the tails of the posterior and biases the inference closer towards the prior mean (the posterior mean is -0.89 in this case, and closer to -3 in the other two cases).#

+
+
+

Informative priors strongly bound the range of probable values (illustrated in Fig. A.5 (c)). One example is a Gaussian distribution with a relatively small standard deviation, so that large values are assigned a close to null prior probability. Another example is the jump from zero to non-zero probability occurring at the truncation point of a truncated Gaussian, which could be justified based on information that the parameter cannot take on values beyond this point. Without this type of justification, however, priors may be too informative, failing to allow the information contained in the available data to update them.

+

Finally, weakly informative priors (illustrated in Fig. A.5 (b)) fall in between [196]. They regularize better than non-informative priors, but allow for more inference flexibility than fully informative priors. An example might be a Gaussian distribution with a moderate standard deviation, which still assigns negligible probability for values far away from the mean, but is less constrained than a narrow Gaussian for a reasonably large area. A key note is that it is not necessarily better to be more informative if this cannot be justified by the available information.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A1.7_Critical_final_step.html b/dev/docs/html/A1.7_Critical_final_step.html new file mode 100644 index 0000000..4e67de2 --- /dev/null +++ b/dev/docs/html/A1.7_Critical_final_step.html @@ -0,0 +1,456 @@ + + + + + + + + + + + The Critical Final Step: Predictive Checks — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

The Critical Final Step: Predictive Checks

+ +
+
+ +
+
+
+ + + + +
+ +
+

The Critical Final Step: Predictive Checks#

+

Every UQ workflow requires a number of choices, potentially including selecting prior distributions, the likelihood specification, and any used numerical models. Checking the appropriateness of these choices is an essential step for sound inferences, as misspecification can produce biased results [179]. Model checking in this fashion is part of an iterative UQ process, as the results can reveal adjustments to the statistical model or the need to select a different numerical model [198, 199, 200].

+

A classic example is the need to check the structure of residuals for correlations. Many standard statistical models, such as linear regression, assume that the residuals are independent and identically distributed from the error distribution. The presence of correlations, including temporal autocorrelations and spatial correlations, indicates a structural mismatch between the likelihood and the data. In these cases, the likelihood should be adjusted to account for these correlations.

+

Checking residuals in this fashion is one example of a predictive check (or a posterior predictive check in the Bayesian setting). One way to view UQ is as a means to recover data-generating processes (associated with each parameter vector) consistent with the observations. Predictive checks compare the inferred data-generating process to the observations to determine whether the model is capable of appropriately capturing uncertainty. After conducting the UQ analysis, alternatively realized datasets are simulated from sampled parameters. These alternative datasets, or their summary statistics, can be tested against the observations to determine adequacy of the fit. Predictive checks are therefore a way of probing various model components to identify shortcomings that might result in biased inferences or poor projections, depending on the goal of the analysis.

+

One example of a graphical predictive check for time series models is hindcasting, where predictive intervals are constructed from the alternative datasets and plotted along with the data. Hindcasts demonstrate how well the model is capable of capturing the broader dynamics of the data, as well as whether the parameter distributions produce appropriate levels of output uncertainty. A related quantitative check is the surprise index, which calculates the percentage of data points located within a fixed predictive interval. For example, the 90% predictive interval should contain approximately 90% of the data. More uncertainty than this reflects underconfidence, while less uncertainty reflects overconfidence. This could be the result of priors that are not appropriately informative, or a likelihood that does not account for correlations between data points appropriately. It could also be the result of a numerical model that isn’t sufficiently sensitive to the parameters that are treated as uncertain.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A1.8_Key_take_home.html b/dev/docs/html/A1.8_Key_take_home.html new file mode 100644 index 0000000..f29f6da --- /dev/null +++ b/dev/docs/html/A1.8_Key_take_home.html @@ -0,0 +1,462 @@ + + + + + + + + + + + Key Take-Home Points — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Key Take-Home Points

+ +
+
+ +
+
+
+ + + + +
+ +
+

Key Take-Home Points#

+

When appropriate, UQ is an important component of the exploratory modeling workflow. While a number of parameter sets could be consistent with observations, they may result in divergent model outputs when exposed to different future conditions. This can result in identifying risks which are not visible when selecting a “best fit” parameterization. Quantifying uncertainties also allows us to quantify the support for hypotheses, which is an essential part of the scientific process.

+

Due to the scale and complexity of the experiments taking place in IM3, UQ has not been extensively used. The tradeoff between the available number of function evaluations and the number of uncertain parameters illustrated in Fig. A.1 is particularly challenging due to the increasing complexity of state-of-the-art models and the movement towards coupled, multisector models. This tradeoff can be addressed somewhat through the use of emulators and parallelizable methods. In particular, when attempting to navigate this tradeoff by limiting the number of uncertain parameters, it is important to carefully iterate with sensitivity analyses to ensure that critical parameters are identified.

+

Specifying prior distributions and likelihoods is an ongoing challenge. Prior distributions, in particular, should be treated as deeply uncertain when appropriate. One key advantage of the methods described in this chapter is that they have the potential for increased transparency. When it is not possible to conduct a sensitivity analysis on a number of critical priors due to limited computational budgets, fully specifying and providing a justification for the utilized distributions allows other researchers to identify key assumptions and build on existing work. The same is true for the specification of likelihoods—while likelihood-free methods avoid the need to specify a likelihood function, they require other assumptions or choices, which should be described and justified as transparently as possible.

+

We conclude this appendix with some key recommendations: +1. UQ analysis does not require full confidence in priors and likelihoods. Rather, UQ should be treated as part of an exploratory modeling workflow, where hypotheses related to model structures, prior distributions, and likelihoods can be tested. +2. For complex multisectoral models, UQ will typically require the use of a reduced set of parameters, either through emulation or by fixing the others to their best-fit values. These parameters should be selected through a careful sensitivity analysis. +3. Avoid the use of supposedly “non-informative” priors, such as uniform priors, whenever possible. In the absence of strong information about parameter values, the use of weakly informative priors, such as diffuse normals, is preferable. +4. Be cognizant of the limitations of conclusions that can be drawn by using each method. The bootstrap, for example, may result in overconfidence if the dataset is limited and is not truly representative of the underlying stochastic process. +5. When using MCMC, Markov chains can not be shown to have converged to the target distribution, but rather evidence can be collected to demonstrate that it is likely that they have. +6. Conduct predictive checks based on the assumptions underlying the choices made in the analysis, and iteratively update those choices if the assumptions prove to be ill-suited for the problem at hand.

+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A1_Uncertainty_Quantification.html b/dev/docs/html/A1_Uncertainty_Quantification.html new file mode 100644 index 0000000..465f452 --- /dev/null +++ b/dev/docs/html/A1_Uncertainty_Quantification.html @@ -0,0 +1,612 @@ + + + + + + + + + + + 1. Uncertainty Quantification — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

A. Uncertainty Quantification#

+
+

A.1. Introduction#

+

As defined in Chapter 1, uncertainty quantification (UQ) refers to the formal focus on the full specification of likelihoods as well as distributional forms necessary to infer the joint probabilistic response across all modeled factors of interest [8]. This is in contrast to UC (the primary focus of the main document of this book), which is instead aimed at identifying which modeling choices yield the most consequential changes or outcomes and exploring alternative hypotheses related to the form and function of modeled systems [9, 10].

+

UQ is important for quantifying the relative merits of hypotheses for at least three main reasons. First, identifying model parameters that are consistent with observations is an important part of model development. Due to several effects, including correlations between parameters, simplified or incomplete model structures (relative to the full real-world dynamics), and uncertainty in the observations, many different combinations of parameter values can be consistent with the model structure and the observations to varying extents. Accounting for this uncertainty is conceptually preferable to selecting a single “best fit” parameter vector, particularly as consistency with historical or present observations does not necessarily guarantee skillful future projections.

+

The act of quantification requires specific assumptions about distributional forms and likelihoods, which may be more or less justified depending on prior information about the system or model behavior. As a result, UQ is well-suited for studies accounting for or addressing hypotheses related to systems with a relatively large amount of available data and models which are computationally inexpensive, particularly when the emphasis is on prediction. As shown in Fig. A.1, there is a fundamental tradeoff between the available number of model evaluations (for a fixed computational budget) and the number of parameters treated as uncertain. Sensitivity analyses are therefore part of a typical UQ workflow to identify which factors can be fixed and which ought to be prioritized in the UQ.

+
+Figure A1.1 + +
+

Fig. A.1 Overview of selected existing approaches for uncertainty quantification and their appropriateness given the number of uncertain model parameters and the number of available model simulations. Green shading denotes regions suitable for uncertainty quantification and red shading indicates regions more appropriate for uncertainty characterization.#

+
+
+

The choice of a particular UQ method depends on both the desired level of quantification and the ability to navigate the tradeoff between computational expense and the number of uncertain parameters (Fig. A.1). For example, Markov chain Monte Carlo with a full system model can provide an improved representation of uncertainty compared to the coarser pre-calibration approach [157], but requires many more model evaluations. The use of a surrogate model to approximate the full system model can reduce the number of needed model evaluations by several orders of magnitude, but the uncertainty quantification can only accommodate a limited number of parameters.

+

The remainder of this appendix will focus on introducing workflows for particular UQ methods, including a brief discussion of advantages and limitations.

+
+
+

A.2. Parametric Bootstrap#

+

The parametric bootstrap [158] refers to a process of model recalibration to alternate realizations of the data. The bootstrap was originally developed to estimate standard errors and confidence intervals without ascertaining key assumptions that might not hold given the available data. In a setting where observations can be viewed as independent realizations of an underlying stochastic process, a sufficiently rich dataset can be treated as a population representing the data distribution. New datasets are then generated by resampling from the data with replacement, and the model can be refit to each new dataset using maximum-likelihood estimation. The resulting distribution of estimates can then be viewed as a representation of parametric uncertainty.

+

A typical workflow for the parametric bootstrap is shown in Fig. A.2. After identifying outputs of interest and preparing the data, the parametric model is fit by some procedure such as minimizing root-mean-square-error or maximizing the likelihood. Alternate datasets are constructed by resampling from the population or by generating new samples from the fitted data-generating process. It is important at this step that the resampled quantities are independent of one another. For example, in the context of temporally- or spatially-correlated data, such as time series, the raw observations cannot be treated as independent realizations. However, the residuals resulting from fitting the model to the data could be (depending on their structure). For example, if the residuals are treated as independent, they can then be resampled with replacement, and these residuals added to the original model fit to create new realizations. If the residuals are assumed to be the result of an autoregressive process, this process could be fit to the original residual series and new residuals be created using this model [159]. The model is then refit to each new realization.

+
+Figure A1.2 + +
+

Fig. A.2 Workflow for the parametric bootstrap.#

+
+
+

The bootstrap is computationally convenient, particularly as the process of fitting the model to each realization can be easily parallelized. This approach also requires minimal prior assumptions. However, due to the assumption that the available data are representative of the underlying data distribution, the bootstrap can neglect key uncertainties which might influence the results. For example, when using an autoregressive process to generate new residuals, uncertainty in the autocorrelation parameter and innovation variance is neglected, which may bias estimates of, for example, low-probability but high-impact events [160].

+
+
+

A.3. Pre-Calibration#

+

Pre-calibration [18, 161, 162] involves the identification of a plausible set of parameters using some prespecified screening criterion, such as the distance from the model results to the observations (based on an appropriate metric for the desired matching features, such as root-mean-squared error). A typical workflow is shown in Fig. A.3. Parameter values are obtained by systematically sampling the input space (see Chapter 3.3). After the model is evaluated at the samples, only those passing the distance criterion are retained. This selects a subset of the parameter space as “plausible” based on the screening criterion, though there is no assignment of probabilities within this plausible region.

+
+Figure A1.3 + +
+

Fig. A.3 Workflow for pre-calibration.#

+
+
+

Pre-calibration can be useful for models which are inexpensive enough that a reasonable +number of samples can be used to represent the parameter space, but which are too expensive to facilitate full uncertainty quantification. High-dimensional parameter spaces, which can be problematic for the uncertainty quantification methods below, may also be explored using pre-calibration. One key prerequisite to using this method is the ability to place a meaningful distance metric on the output space.

+

However, pre-calibration results in a very coarse characterization of uncertainty, especially when considering a large number of parameters, as more samples are needed to fully characterize the parameter space. Due to the inability to evaluate the relative probability of regions of the parameter space beyond the binary plausible-and-implausible characterization, pre-calibration can also result in degraded hindcast and projection skills and parameter estimates [157, 163, 164].

+

A related method, widely used in hydrological studies, is generalized likelihood uncertainty estimation, or GLUE [18]. Unlike pre-calibration, the underlying argument for GLUE relies on the concept of equifinality [165], which posits that it is impossible to find a uniquely well-performing parameter vector for models of abstract environmental systems [165, 166]. In other words, there exist multiple parameter vectors which perform equally or similarly well. As with pre-calibration, GLUE uses a goodness-of-fit measure (though this is called a “likelihood” in the GLUE literature, as opposed to a statistical likelihood function [167]) to evaluate samples. After setting a threshold of acceptable performance with respect to that measure, samples are evaluated and classified into “behavioral” or “non-behavioral” according to the threshold.

+
+

Note

+

Put this into practice! Click the following badge to try out an interactive tutorial on utilizing Pre-Calibration and GLUE for HYMOD model calibration: Pre-Calibration Jupyter Notebook

+
+
+
+

A.4. Markov Chain Monte Carlo#

+

Markov chain Monte Carlo (MCMC) is a “gold standard” approach to full uncertainty quantification. MCMC refers to a category of algorithms which systematically sample from a target distribution (in this case, the posterior distribution) by constructing a Markov chain. A Markov chain is a probabilistic structure consisting of a state space, an initial probability distribution over the states, and a transition distribution between states. If a Markov chain satisfies certain properties [168, 169], the probability of being in each state will eventually converge to a stable, or stationary, distribution, regardless of the initial probabilities.

+

MCMC algorithms construct a Markov chain of samples from a parameter space (the combination of model and statistical parameters). This Markov chain is constructed so that the stationary distribution is a target distribution, in this case the (Bayesian) posterior distribution. As a result, after the transient period, the resulting samples can be viewed as a set of dependent samples from the posterior (the dependence is due to the autocorrelation between samples resulting from the Markov chain transitions). Expected values can be computed from these samples (for example, using batch-means estimators [170]), or the chain can be sub-sampled or thinned and the resulting samples used as independent Monte Carlo samples due to the reduced or eliminated autocorrelation.

+

A general workflow for MCMC is shown in Fig. A.4. The first decision is whether to use the full model or a surrogate model (or emulator). Typical surrogates include Gaussian process emulation [171, 172], polynomial chaos expansions [173, 174], support vector machines [175, 176], and neural networks [177, 178]. Surrogate modeling can be faster, but requires a sufficient number of model evaluations for the surrogate to accurately represent the model’s response surface, and this typically limits the number of parameters which can be included in the analysis.

+
+Figure A1.4 + +
+

Fig. A.4 Workflow for Markov chain Monte Carlo.#

+
+
+

After selecting the variables which will be treated as uncertain, the next step is to specify the likelihood based on the selected surrogate model or the structure of the data-model residuals. For example, it may not always be appropriate to treat the residuals as independent and identically distributed (as is commonly done in linear regression). A mis-specification of the residual structure can result in biases and over- or under-confident inferences and projections [179].

+

After specifying the prior distributions (see Chapter A.6), the selected MCMC algorithm should be used to draw samples from the posterior distribution. There are many MCMC algorithms, all of which have advantages and disadvantages for a particular problem. These include the Metropolis-Hastings algorithm [169] and Hamiltonian Monte Carlo [180, 181]. Software packages typically implement one MCMC method, sometimes designed for a particular problem setting or likelihood specification. For example, R’s adaptMCMC implements an adaptive Metropolis-Hastings algorithm [182], while NIMBLE [183, 184] uses a user-customizable Metropolis-Hastings implementation, as well as functionality for Gibbs sampling (which is a special case of Metropolis-Hastings where the prior distribution has a convenient mathematical form). Some recent implementations, such as Stan [185], pyMC3 [186], and Turing [187] allow different algorithms to be used.

+

A main consideration when using MCMC algorithms is testing for convergence to the target distribution. As convergence is guaranteed only for a sufficiently large number of transitions, it is impossible to conclude for certain that a chain has converged for a fixed number of iterations. However, several heuristics have been developed [170, 188] to increase evidence that convergence has occurred.

+
+
+

A.5. Other Methods#

+

Other common methods for UQ exist. These include sequential Monte Carlo, otherwise known as particle filtering [189, 190, 191], where a number of particles are used to evaluate samples. An advantage of sequential Monte Carlo is that the vast majority of the computation can be parallelized, unlike with standard MCMC. A major weakness is the potential for degeneracy [190], where many particles have extremely small weights, resulting in the effective use of only a few samples.

+

Another method is approximate Bayesian computation (ABC) [192, 193, 194]. ABC is a likelihood-free approach that compares model output to a set of summary statistics. ABC is therefore well-suited for models and residual structures which do not lend themselves to a computationally-tractable likelihood, but the resulting inferences are known to be biased if the set of summary statistics is not sufficient, which can be difficult to know a-priori.

+
+
+

A.6. The Critical First Step: How to Choose a Prior Distribution#

+

Prior distributions play an important role in Bayesian uncertainty quantification, particularly when data is limited relative to the dimension of the model. Bayesian updating can be thought of as an information filter, where each additional datum is added to the information contained in the prior; eventually, the prior makes relatively little impact. In real world problems, it can be extremely difficult to assess how much data is required for the choice of prior to become less relevant. The choice of prior can also be influential when conducting SA prior to or without UQ. This is because a prior distribution for a sensitivity analysis for a given parameter which is much wider than the region where the model response surface is sensitive to the parameter value might cause the sensitivity calculation to underestimate the response in that potentially critical region. Similarly, a prior which is too narrow may miss regions where the model responds to the parameter altogether.

+

Ideally, prior distributions are constructed independently of any analysis of the new data considered. This is because using data to inform the prior as well as to compute the likelihood reuses information in a potentially inappropriate way, which can lead to overconfident inferences. Following Jaynes [195], Gelman et al. [196] refers to the ideal prior as one which encodes all available information about the model. For practical reasons (difficulty of construction or computational inconvenience), most priors fail to achieve this ideal. These compromises mean that priors should be transparently articulated and justified, so that the impact of the choice of prior can be fully understood. When there is ambiguity about an appropriate prior, such as how fat the tails should be, an analyst should examine how sensitive the UQ results are to the choice of prior.

+

Priors can also be classified in terms of the information encoded by them, demonstrated in Fig. A.5. Non-informative priors (illustrated in Fig. A.5 (a)) allegedly correspond to (and are frequently justified by) a position of ignorance. A classic example is the use of a uniform distribution. A uniform prior can, however, be problematic, as it can lead to improper inferences by giving extremely large values the same prior probability as values which may seem more likely [196, 197], and therefore does not really reflect a state of complete ignorance. In the extreme case of a uniform prior over the entire real line, every particular region has effectively a prior weight of zero, even though not all regions are a priori unlikely [197]. Moreover, a uniform prior which excludes possible parameter values is not actually noninformative, as it assigns zero probability to those values while jumping to a nonzero probability as soon as the boundary is crossed. While a uniform prior can be problematic for the task of uncertainty quantification, it may be useful for an initial sensitivity analysis to identify the boundary of any regions where the model is sensitive to the parameter.

+
+Figure A1.5 + +
+

Fig. A.5 Impact of priors on posterior inferences. These plots show the results of inference for a linear regression model with 15 data points. The true value of the parameter is equal to -3. All priors have mean 0. In panel (a), a non-informative prior allows the tails of the posterior to extend freely, which may result in unreasonably large parameter values. In panel (b), a weakly informative prior constrains the tails more, but allows them to extend without too much restriction. In panel (c), an informative prior strongly constrains the tails of the posterior and biases the inference closer towards the prior mean (the posterior mean is -0.89 in this case, and closer to -3 in the other two cases).#

+
+
+

Informative priors strongly bound the range of probable values (illustrated in Fig. A.5 (c)). One example is a Gaussian distribution with a relatively small standard deviation, so that large values are assigned a close to null prior probability. Another example is the jump from zero to non-zero probability occurring at the truncation point of a truncated Gaussian, which could be justified based on information that the parameter cannot take on values beyond this point. Without this type of justification, however, priors may be too informative, failing to allow the information contained in the available data to update them.

+

Finally, weakly informative priors (illustrated in Fig. A.5 (b)) fall in between [196]. They regularize better than non-informative priors, but allow for more inference flexibility than fully informative priors. An example might be a Gaussian distribution with a moderate standard deviation, which still assigns negligible probability for values far away from the mean, but is less constrained than a narrow Gaussian for a reasonably large area. A key note is that it is not necessarily better to be more informative if this cannot be justified by the available information.

+
+
+

A.7. The Critical Final Step: Predictive Checks#

+

Every UQ workflow requires a number of choices, potentially including selecting prior distributions, the likelihood specification, and any used numerical models. Checking the appropriateness of these choices is an essential step for sound inferences, as misspecification can produce biased results [179]. Model checking in this fashion is part of an iterative UQ process, as the results can reveal adjustments to the statistical model or the need to select a different numerical model [198, 199, 200].

+

A classic example is the need to check the structure of residuals for correlations. Many standard statistical models, such as linear regression, assume that the residuals are independent and identically distributed from the error distribution. The presence of correlations, including temporal autocorrelations and spatial correlations, indicates a structural mismatch between the likelihood and the data. In these cases, the likelihood should be adjusted to account for these correlations.

+

Checking residuals in this fashion is one example of a predictive check (or a posterior predictive check in the Bayesian setting). One way to view UQ is as a means to recover data-generating processes (associated with each parameter vector) consistent with the observations. Predictive checks compare the inferred data-generating process to the observations to determine whether the model is capable of appropriately capturing uncertainty. After conducting the UQ analysis, alternatively realized datasets are simulated from sampled parameters. These alternative datasets, or their summary statistics, can be tested against the observations to determine adequacy of the fit. Predictive checks are therefore a way of probing various model components to identify shortcomings that might result in biased inferences or poor projections, depending on the goal of the analysis.

+

One example of a graphical predictive check for time series models is hindcasting, where predictive intervals are constructed from the alternative datasets and plotted along with the data. Hindcasts demonstrate how well the model is capable of capturing the broader dynamics of the data, as well as whether the parameter distributions produce appropriate levels of output uncertainty. A related quantitative check is the surprise index, which calculates the percentage of data points located within a fixed predictive interval. For example, the 90% predictive interval should contain approximately 90% of the data. More uncertainty than this reflects underconfidence, while less uncertainty reflects overconfidence. This could be the result of priors that are not appropriately informative, or a likelihood that does not account for correlations between data points appropriately. It could also be the result of a numerical model that isn’t sufficiently sensitive to the parameters that are treated as uncertain.

+
+
+

A.8. Key Take-Home Points#

+

When appropriate, UQ is an important component of the exploratory modeling workflow. While a number of parameter sets could be consistent with observations, they may result in divergent model outputs when exposed to different future conditions. This can result in identifying risks which are not visible when selecting a “best fit” parameterization. Quantifying uncertainties also allows us to quantify the support for hypotheses, which is an essential part of the scientific process.

+

Due to the scale and complexity of the experiments taking place in IM3, UQ has not been extensively used. The tradeoff between the available number of function evaluations and the number of uncertain parameters illustrated in Fig. A.1 is particularly challenging due to the increasing complexity of state-of-the-art models and the movement towards coupled, multisector models. This tradeoff can be addressed somewhat through the use of emulators and parallelizable methods. In particular, when attempting to navigate this tradeoff by limiting the number of uncertain parameters, it is important to carefully iterate with sensitivity analyses to ensure that critical parameters are identified.

+

Specifying prior distributions and likelihoods is an ongoing challenge. Prior distributions, in particular, should be treated as deeply uncertain when appropriate. One key advantage of the methods described in this chapter is that they have the potential for increased transparency. When it is not possible to conduct a sensitivity analysis on a number of critical priors due to limited computational budgets, fully specifying and providing a justification for the utilized distributions allows other researchers to identify key assumptions and build on existing work. The same is true for the specification of likelihoods—while likelihood-free methods avoid the need to specify a likelihood function, they require other assumptions or choices, which should be described and justified as transparently as possible.

+

We conclude this appendix with some key recommendations: +1. UQ analysis does not require full confidence in priors and likelihoods. Rather, UQ should be treated as part of an exploratory modeling workflow, where hypotheses related to model structures, prior distributions, and likelihoods can be tested. +2. For complex multisectoral models, UQ will typically require the use of a reduced set of parameters, either through emulation or by fixing the others to their best-fit values. These parameters should be selected through a careful sensitivity analysis. +3. Avoid the use of supposedly “non-informative” priors, such as uniform priors, whenever possible. In the absence of strong information about parameter values, the use of weakly informative priors, such as diffuse normals, is preferable. +4. Be cognizant of the limitations of conclusions that can be drawn by using each method. The bootstrap, for example, may result in overconfidence if the dataset is limited and is not truly representative of the underlying stochastic process. +5. When using MCMC, Markov chains can not be shown to have converged to the target distribution, but rather evidence can be collected to demonstrate that it is likely that they have. +6. Conduct predictive checks based on the assumptions underlying the choices made in the analysis, and iteratively update those choices if the assumptions prove to be ill-suited for the problem at hand.

+
+
+ + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A2.1_fishgame.html b/dev/docs/html/A2.1_fishgame.html new file mode 100644 index 0000000..b95ea6d --- /dev/null +++ b/dev/docs/html/A2.1_fishgame.html @@ -0,0 +1,856 @@ + + + + + + + + + + + Fishery Dynamics Tutorial — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

Fishery Dynamics Tutorial#

+
+

Note

+
+
Run the tutorial interactively: Fishery Dynamics Notebook.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

Tutorial: Sensitivity Analysis (SA) to discover factors shaping consequential dynamics#

+

This notebook demonstrates the application of sensitivity analysis to +discover factors that shape the behavior modes of a socio-ecological +system with dynamic human action.

+

The system of differential equations below represent a system of prey +(defined in the equation below as x) and predator (defined as y) fish, +with a human actor harvesting the prey fish. You can read more about +this system at Hadjimichael et +al. (2020).

+_images/eqn2.png +

The table below defines the parameters in the system and also denotes +the baseline and ranges associated with each uncertain parameter.

+_images/table1.png +

The system is simple but very rich in the dynamic behaviors it exhibits. +This complexity is accompanied by the presence of several equilibria +that come in and out of existence with different parameter values. The +equilibria also change in their stability according to different +parameter values, giving rise to different behavior modes as shown by +the diverse predator and prey abundace trajectories in the figure below.

+_images/Figure_1.png +

In the unharvested system (without the human actor) the stability of +several of these equilibria can be derived analytically. The task +becomes significantly more difficult when the adaptive human actor is +introduced, deciding to harvest the system at different rates according +to their objectives and preferences.

+

Sensitivity analysis methods can help us identify the factors that most +control these dynamics by exploring the space of parameter values and +seeing how system outputs change as a result.

+

Through previously conducted optimization, there already exists a set of +potential harvesting strategies that were identified in pursuit of five +objectives:

+
    +
  • Maximize Harvesting Discounted Profits (Net Present Value)

  • +
  • Minimize Prey Population Deficit

  • +
  • Minimize Longest Duration of Consecutive Low Harvest

  • +
  • Maximize Worst Harvest Instance

  • +
  • Minimize Harvest Variance

  • +
+

The identified harvesting strategies also meet the necessary constraint +of not causing inadvertent predator collapse.

+

We will be examining the effects of parametric uncertainty on these +identified strategies, particularly focusing on two strategies: one +selected to maximize harvesting profits and one identified through +previous analysis to perform ‘well enough’ for all objectives across a +wide range of states of the world (referred to as the ‘robust’ +harvesting policy).

+
+

Let’s get started!#

+

In this tutorial, we will be loading in data that has been produced in +Hadjimichael et al. (2020). Before we start our analysis, we’ll load the +relevant Python libraries. NOTE: To step through the notebook, +execute each gray (code) box by typing “Shift+Enter”.

+
#Import necessary libraries
+
+import msdbook
+import numpy as np
+import matplotlib.pyplot as plt
+from SALib.sample import saltelli
+from SALib.analyze import sobol
+from matplotlib import patheffects as pe
+
+# load example data
+msdbook.install_package_data()
+
+%matplotlib inline
+%config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
+
+
+
Downloading example data for msdbook version 0.1.5...
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/uncertain_params_bounds.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_metric_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/solutions.resultfile
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LHsamples_original_1000.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/param_values.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/collapse_days.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_params_256samples.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LeafCatch.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_simulations_256samples.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/Robustness.txt
+
+
+
+
+

Step 1: Load identified solutions and explore performance#

+

Here we load in the solution set obtained in Hadjimichael et al. (2020). +The solution set contains the decision variables and objectives +associated with a variety of harvesting policies. For this tutorial, we +focus on comparing two policies: harvesting profits and one that +performs robustly across all objectives. Below, we are reading in the +decision variables and objectives from an external file that can be +found within the msdbook package data.

+
robustness = msdbook.load_robustness_data()
+results = msdbook.load_profit_maximization_data()
+
+robust_solution = np.argmax(robustness[:,-1]) #pick robust solution
+profit_solution = np.argmin(results[:,6]) #pick profitable solution
+objective_performance = -results[:,6:] #Retain objective values
+
+# Get decision variables for each of the policies
+highprofitpolicy = results[profit_solution,0:6]
+mostrobustpolicy = results[robust_solution,0:6]
+
+
+

Next we plot the identified solutions with regards to their objective +performance in a parallel axis plot

+
+

Tip

+

View the source code used to create this plot here: plot_objective_performance

+
+
ax, ax1 = msdbook.plot_objective_performance(objective_performance, profit_solution, robust_solution)
+
+
+_images/fishery_output_6_0.png +

The solution set from the optimization in Hadjimichael et al. (2020) are +presented in a parallel axis plot where each of the five objectives (and +one constraint) are represented as an axis. Each solution on the Pareto +front is represented as a line where the color of the line indicates the +value of the NPV objective. The preference for objective values is in +the upward direction. Therefore, the ideal solution would be a line +straight across the top of the plot that satisfies every objective. +However, no such line exists because there are tradeoffs when sets of +objectives are prioritized over the others. When lines cross in between +axes, this indicates a tradeoff between objectives (as seen in the first +two axes).The solution that is most robust in the NPV objective has the +highest value on the first axis and is outlined in dark gold. The +solution that is most robust across all objectives is outlined in a +brighter yellow. A parallel axis is an effective visual to characterize +high-dimensional tradeoffs in the system and visualize differences in +performance across policies.

+
+
+

Step 2: Use SALib to generate a sample for a Sobol sensitivity analysis#

+

In Step 1, we showed how the optimized harvesting policies performed in +the objective space, which utilized the baseline parameters outlined in +the table above. Now, we are interested in understanding how sensitive +our two policies are to alternative states of the world that may be +characterized by different parameter values. To do so, we first need to +define the problem dictionary that allows us to generate these +alternative states of the world.

+
# Set up SALib problem
+problem = {
+  'num_vars': 9,
+  'names': ['a', 'b', 'c', 'd', 'h', 'K', 'm', 'sigmaX', 'sigmaY'],
+  'bounds': [[0.002, 2], [0.005, 1], [0.2, 1], [0.05, 0.2], [0.001, 1],
+             [100, 5000], [0.1, 1.5], [0.001, 0.01], [0.001, 0.01]]
+}
+
+
+

Then we use the following command to generate a Saltelli sample from +these defined ranges:

+
param_values = saltelli.sample(problem, 1024, calc_second_order=False)
+
+
+

Generally, it is a good idea to save the result of the sample since it +is often reused and regenerating it produces a different sample set. For +this reason, we will load one from file that was previously generated.

+
# load previously generated Saltelli sample from our msdbook package data
+param_values = msdbook.load_saltelli_param_values()
+
+
+
+
+

Step 3: Evaluate the system over all generated states of the world#

+

Now we re-evaluate how well the policies do in the new states of the +world. In order to characterize failure of a policy, we identify the +states where the predator population collapses, as an inadvertent +consequence of applying the harvesting strategy under a state of the +world different from the one originally assumed. Due to how long this +step takes to execute within the tutorial, we will read in the solutions +from an external file. However, the block of code below shows how +evaluation can be implemented.

+
# create array to store collapse values under both policies
+collapse_days = np.zeros([len(param_values), 2])
+
+# evaluate performance under every state
+for i in range(len(param_values)):
+
+    additional_inputs = np.append(['Previous_Prey'],
+                                  [param_values[i,0],
+                                   param_values[i,1],
+                                   param_values[i,2],
+                                   param_values[i,3],
+                                   param_values[i,4],
+                                   param_values[i,5],
+                                   param_values[i,6],
+                                   param_values[i,7],
+                                   param_values[i,8]])
+
+    collapse_days[i,0]=fish_game(highprofitpolicy, additional_inputs)[1][0]
+    collapse_days[i,1]=fish_game(mostrobustpolicy, additional_inputs)[1][0]
+
+
+
# load the simulation data from our msdbook package data
+collapse_days = msdbook.load_collapse_data()
+
+
+
+
+

Step 4: Calculate sensitivity indices#

+

Now we use a Sobol sensitivity analysis to calculate first-order, +second-order, and total-order sensitivity indices for each parameter and +for each of the two policies. These indicies help determine which +factors explain the most variability in the number of days of predator +population collapse.

+
#Perform the Sobol SA for the profit-maximizing solution
+Si_profit = sobol.analyze(problem, collapse_days[:, 0],
+                          calc_second_order=False,
+                          conf_level=0.95,
+                          print_to_console=True)
+
+
+
#Perform the Sobol SA for the robust solution
+Si_robustness = sobol.analyze(problem,
+                              collapse_days[:, 1],
+                              calc_second_order=False,
+                              conf_level=0.95,
+                              print_to_console=True)
+
+
+
              ST   ST_conf
+a       0.226402  0.036146
+b       0.066819  0.013347
+c       0.004395  0.004023
+d       0.024509  0.006993
+h       0.009765  0.005488
+K       0.020625  0.009494
+m       0.897971  0.066470
+sigmaX  0.000136  0.000149
+sigmaY  0.000739  0.001040
+              S1   S1_conf
+a       0.087936  0.044236
+b       0.000554  0.021474
+c      -0.002970  0.004590
+d       0.001206  0.015881
+h       0.004554  0.007998
+K       0.003843  0.012661
+m       0.751301  0.071862
+sigmaX -0.000325  0.001245
+sigmaY -0.001887  0.002768
+
+
+

Looking at the total-order indices, (ST) factors \(m\), \(a\), +\(b\), \(d\) and \(K\) explain a non-negligible amount of +variance therefore have an effect on the stability of this system. +Looking at the first-order indices (S1), we also see that besides +factors \(m\) and \(a\), all other factors are important in this +system through their interactions, which make up the difference between +their S1 and ST indices. This shows the danger of limiting sensitivity +analyses to first order effects, as factor importance might be +significantly misjudged.

+

These findings are supported by the analytical condition of equilibrium +stability in this system:

+_images/eqn4.png +

In an unharvested system, this condition is both necessary and +sufficient for the equilibrium of the two species coexisting to be +stable.

+

When adaptive human action is introduced however, this condition is +still necessary, but no longer sufficient, as harvesting reduces the +numbers of prey fish and as a result reduces the resources for the +predator fish. Since this harvesting value is not constant, but can +dynamically adapt according to the harvester’s objectives, it cannot be +introduced into this simple equation.

+
+
+

Step 5: Explore relationship between uncertain factors and performance#

+

In the following steps, we will use the results of our sensitivity +analysis to investigate the relationships between parametric +uncertainty, equilibrium stability and the performance of the two +policies.

+

We can use the top three factors identified (\(m\), \(a\), and +\(b\)) to visualize the performance of our policies in this +three-dimensional parametric space.

+

We first define the stability condition, as a function of \(b\) and +\(m\), and calculate the corresponding values of \(a\).

+
def inequality(b, m, h, K):
+    return ((b**m)/(h*K)**(1-m))
+
+# boundary interval that separates successful and failed states of the world
+b = np.linspace(start=0.005, stop=1, num=1000)
+m = np.linspace(start=0.1, stop=1.5, num=1000)
+h = np.linspace(start=0.001, stop=1, num=1000)
+K = np.linspace(start=100, stop=2000, num=1000)
+b, m = np.meshgrid(b, m)
+a = inequality(b, m, h, K)
+a = a.clip(0,2)
+
+
+
+

Tip

+

View the source code used to create this plot here: plot_factor_performance

+
+
# generate plot
+ax1, ax2 = msdbook.plot_factor_performance(param_values, collapse_days, b, m, a)
+
+
+_images/fishery_output_22_0.png +

These figures show the combinations of factors that lead to success or +failure in different states of the world for the profit-maximizing and +robust policies. Each point is a state of the world, characterized by +specific values of the parameters, and ideally, we would like the color +of the point to be blue, to represent that there are a low number of +days with a predator collapse in that world. The gray curve denotes the +highly non-linear nature of the boundary, defined by the stability +condition, that separates successful and failed states of the world. The +figures demonstrate the following key points:

+

First, as asserted above, the policies interact with the system in +different and complex ways. In the presence of human action, the +stability condition is not sufficient in determining whether the policy +will succeed, even though it clearly shapes the system in a fundamental +manner.

+

Secondly, the robust policy manages to avoid collapse in many more of the sampled states of the world, indicated by the number of blue points. The robust policy avoids collapse in 31% of worlds versus 14% in the profit-maximizing policy.This presents a clear tradeoff between profit-maximizing performance androbustness against uncertainty.

+
+
+

Tips to Apply Sobol SA and Scenario Discovery to your Problem#

+

In this tutorial, we demonstrated a Sobol SA to identify the most +important factors driving the behavior of a system (i.e. the number of +the collapse days). In order to apply this methodology to your problem, +you will need to have a set of optimized policies for your system that +you are interested in analyzing. The general workflow is as follows:

+
    +
  1. Choose sampling bounds for your parameters and set up the problem +dictionary as in Step 2 above.

  2. +
  3. Generate samples, or alternative states of the world using the +saltelli.sample function.

  4. +
  5. Evaluate your policies on the alternative states of the world. For +your application, you will also need to develop a rule for +determining success or failure of your policy in a new SOW. In this +tutorial, success was denoted by a small number of collapse days. +Ultimately, the rule will be specific to your application and can +include various satisficing criteria.

  6. +
  7. Calculate the Sobol indices and discover the most important +parameters driving success and failure.

  8. +
  9. Finally, use a similar plotting procedure as in step 5 to identify +the combination of parameter values that lead to success and failure +in the system.

  10. +
+
+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A2.2_saltelli.html b/dev/docs/html/A2.2_saltelli.html new file mode 100644 index 0000000..d9338ba --- /dev/null +++ b/dev/docs/html/A2.2_saltelli.html @@ -0,0 +1,766 @@ + + + + + + + + + + + Sobol SA Tutorial — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

Sobol SA Tutorial#

+
+

Note

+
+
Run the tutorial interactively: Sobol SA Tutorial.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

Tutorial: Sensitivity Analysis (SA) using the Saltelli sampling scheme with Sobol SA#

+

In this tutorial, we will set up a workflow to investigate how sensitive +the output of a function is to its inputs. Why might you want to do +this? Imagine that this function represents a complex system, such as +the rainfall-runoff process of a watershed model, and that you, the +researcher, want to investigate how your choice of input parameter +values are affecting the model’s characterization of runoff in the +watershed. Your parameter values are likely uncertain and can take on +any value in a pre-defined range. Using a Sobol SA will allow you to +sample different values of your parameters and calculate how sensitive +your output of interest is to certain parameters. Below, we demonstrate +Sobol SA for a simple function to illustrate the method, but the +workflow can be applied to your own problem of interest!

+

In order to conduct this analysis, we will use the popular Python +Sensitivity Analysis Library +(SALib) to:

+
    +
  1. Generate a problem set as a dictionary for our Ishigami function that has three inputs

  2. +
  3. Generate 2048 samples for our problem set using the Saltelli [1] [2] sampling scheme

  4. +
  5. Execute the Ishigami function for each of our samples and gather the outputs

  6. +
  7. Compute the sensitivity analysis to generate first-order and total-order sensitivity indices using the Sobol [3] method

  8. +
  9. Interpret the meaning of our results

  10. +
+
+

Let’s get started!#

+

NOTE: Content from this tutorial is taken directly from the SALib +“Basics” +walkthrough. To step through the notebook, execute each gray (code) box +by typing “Shift+Enter”.

+
#Import relevant libraries
+import numpy as np
+import matplotlib.pyplot as plt
+from mpl_toolkits import mplot3d
+
+from SALib.sample import saltelli
+from SALib.analyze import sobol
+from SALib.test_functions import Ishigami
+
+
+
+
+

Step 1: Generate the problem dictionary#

+

The Ishigami function is of the form:

+
+\[f(x_1,x_2,x_3) = sin(x_1)+asin^2(x_2)+bx_3^4sin(x_1)\]
+

The function has three inputs, 𝑥1, 𝑥2, 𝑥3 where 𝑥𝑖 ∈ [−𝜋, 𝜋]. The +constants \(a\) and \(b\) are defined as 7.0 and 0.1 +respectively.

+
#Create a problem dictionary. Here we supply the number of variables, the names of each variable, and the bounds of the variables.
+problem = {
+    'num_vars': 3,
+    'names': ['x1', 'x2', 'x3'],
+    'bounds': [[-3.14159265359, 3.14159265359],
+               [-3.14159265359, 3.14159265359],
+               [-3.14159265359, 3.14159265359]]
+}
+
+
+
+
+

Step 2: Generate samples using the Saltelli sampling scheme#

+

Sobol SA requires the use of the Saltelli sampling scheme. The output of +the saltelli.sample function is a NumPy array that is of shape 2048 +by 3. The sampler generates 𝑁∗(2𝐷+2) samples, where in this example, N +is 256 (the argument we supplied) and D is 3 (the number of model +inputs), yielding 2048 samples. The keyword argument +calc_second_order=False will exclude second-order indices, resulting +in a smaller sample matrix with 𝑁∗(𝐷+2) rows instead. Below, we plot the +resulting Saltelli sample.

+
#Generate parmeter values using the saltelli.sample function
+param_values = saltelli.sample(problem, 256)
+
+print(f"`param_values` shape:  {param_values.shape}")
+
+
+
param_values shape:  (2048, 3)
+
#Plot the 2048 samples of the parameters
+
+fig = plt.figure(figsize = (7, 5))
+ax = plt.axes(projection ="3d")
+ax.scatter3D(param_values[:,0], param_values[:,1], param_values[:,2])
+ax.set_xlabel('X1 Parameter')
+ax.set_ylabel('X2 Parameter')
+ax.set_zlabel('X3 Parameter')
+plt.title("Saltelli Sample of Parameter Values")
+
+plt.show()
+
+
+_images/output_7_0.png +
+
+

Step 3: Execute the Ishigami function over our sample set#

+

SALib provides a nice wrapper to the Ishigami function that allows the +user to directly pass the param_values array we just generated into +the function directly.

+
Y = Ishigami.evaluate(param_values)
+
+
+
+
+

Step 4: Compute first-, second-, and total-order sensitivity indices using the Sobol method#

+

The sobol.analyze function will use our problem dictionary and the +result of the Ishigami runs (Y) to compute first-, second-, and +total-order indicies.

+
Si = sobol.analyze(problem, Y)
+
+
+

Si is a Python dict with the keys “S1”, “S2”, “ST”, “S1_conf”, +“S2_conf”, and “ST_conf”. The _conf keys store the corresponding +confidence intervals, typically with a confidence level of 95%. Use the +keyword argument print_to_console=True to print all indices. Or, we +can print the individual values from Si as shown in the next step.

+
+
+

Step 5: Interpret our results#

+

We execute the following code and take a look at our first-order indices +(S1) for each of our three inputs. These indicies can be interpreted +as the fraction of variance in the output that is explained by each +input individually.

+
first_order = Si['S1']
+
+print('First-order:')
+print(f"x1: {first_order[0]}, x2: {first_order[1]}, x3: {first_order[2]}")
+
+
+
First-order:
+x1: 0.3184242969763115, x2: 0.4303808201623416, x3: 0.022687722804980225
+
+
+

If we were to rank the importance of the inputs in how much they +individually explain the variance in the output, we would rank them from +greatest to least importance as follows: 𝑥2, 𝑥1 and then 𝑥3. Since 𝑥3 +only explains 2% of the output variance, it does not explain output +variability meaningfully. Thus, this indicates that there is +contribution to the output variance by 𝑥2 and 𝑥1 independently, whereas +𝑥3 does not contribute to the output variance. Determining what inputs +are most important or what index value is meaningful is a common +question, but one for which there is no general rule or threshold. This +question is problem and context-dependent, but procedures have been +identified to rank order influential inputs and which can be used to +identify the least influential factors. These factors can be fixed to +simplify the model [4] [5] [6].

+

Next, we evaluate the total-order indices, which measure the +contribution to the output variance caused by varying the model input, +including both its first-order effects (the input varying alone) and all +higher-order interactions across the input parameters.

+
total_order = Si['ST']
+
+print('Total-order:')
+print(f"x1: {total_order[0]}, x2: {total_order[1]}, x3: {total_order[2]}")
+
+
+
Total-order:
+x1: 0.5184119098161343, x2: 0.41021260250026054, x3: 0.2299058431439953
+
+
+

The magnitude of the total order indices are substantially larger than +the first-order indices, which reveals that higher-order interactions +are occurring, i.e. that the interactions across inputs are also +explaining some of the total variance in the output. Note that 𝑥3 has +non-negligible total-order indices, which indicates that it is not a +consequential parameter when considered in isolation, but becomes +consequential and explains 25% of variance in the output through its +interactions with 𝑥1 and 𝑥2.

+

Finally, we can investigate these higher order interactions by viewing +the second-order indices. The second-order indicies measure the +contribution to the output variance caused by the interaction between +any two model inputs. Some computing error can appear in these +sensitivity indices, such as negative values. Typically, these computing +errors shrink as the number of samples increases.

+
second_order = Si['S2']
+
+print("Second-order:")
+print(f"x1-x2:  {second_order[0,1]}")
+print(f"x1-x3:  {second_order[0,2]}")
+print(f"x2-x3:  {second_order[1,2]}")
+
+
+
Second-order:
+x1-x2:  -0.043237389723234154
+x1-x3:  0.17506452088709862
+x2-x3:  -0.03430682392607577
+
+
+

We can see that there are strong interactions between 𝑥1 and 𝑥3. Note +that in the Ishigami function, these two variables are multiplied in the +last term of the function, which leads to interactive effects. If we +were considering first order indices alone, we would erroneously assume +that 𝑥3 explains no variance in the output, but the second-order and +total order indices reveal that this is not the case. It’s easy to +understand where we might see interactive effects in the case of the +simple Ishigami function. However, it’s important to remember that in +more complex systems, there may be many higher-order interactions that +are not apparent, but could be extremely consequential in explaining the +variance of the output.

+
+
+

Tips to Apply Sobol SA to Your Own Problem#

+

In this tutorial, we demonstrated how to apply an SA analysis to a +simple mathematical test function. In order to apply a Sobol SA to your +own problem, you will follow the same general workflow that we defined +above. You will need to:

+
    +
  1. Choose sampling bounds for your parameters and set up the problem +dictionary as in Step 1 above.

  2. +
  3. Generate samples using the saltelli.sample function. This step is +problem-dependent and note that the Sobol method can be +computationally intensive depending on the model being analyzed. For +example, for a simple rainfall-runoff model such as HYMOD, it has +been recommended to run a sample size of at least N = 10,000 (which +translates to 60,000 model runs). More complex models will be slower +to run and will also require more samples to calculate accurate +estimates of Sobol indices. Once you complete this process, pay +attention to the confidence bounds on your sensitivity indices to see +whether you need to run more samples.

  4. +
  5. Run the parameter sets through your model. In the example above, the +Ishigami function could be evaluated through SALib since it is a +built in function. For your application, you will need to run these +parameter sets through the problem externally and save the output. +The output file should contain one row of output values for each +model run.

  6. +
  7. Calculate the Sobol indices. Now, the Y will be a numpy array with +your external model output and you will need to include the parameter +samples as an additional argument.

  8. +
  9. Finally, we interpet the results. If the confidence intervals of your +dominant indices are larger than roughly 10% of the value itself, you +may want to consider increasing your sample size as computation +permits. You should additionally read the references noted in Step 5 +above to understand more about identifying important factors.

  10. +
+

References

+ +
+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A2.3_logistic.html b/dev/docs/html/A2.3_logistic.html new file mode 100644 index 0000000..aa3afe5 --- /dev/null +++ b/dev/docs/html/A2.3_logistic.html @@ -0,0 +1,828 @@ + + + + + + + + + + + Logistic Regression Tutorial — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

Logistic Regression Tutorial#

+
+

Note

+
+
Run the tutorial interactively: Logistic Regression Tutorial.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

Tutorial: Logistic Regression for Factor Mapping#

+

This tutorial replicates a scenario discovery analysis performed in +Hadjimichael et +al. (2020).

+
+

Background#

+

Planners in the the Upper Colorado River Basin (UCRB, shown in the +figure below) are seeking to understand the vulnerability of water users +to uncertainties stemming from climate change, population growth and +water policy changes. The UCRB spans 25,682 km2 in western Colorado and +is home to approximately 300,000 residents and 1,012 km2 of irrigated +land. Several thousand irrigation ditches divert water from the main +river and its tributaties for irrigation (shown as small black dots in +the figure). Transmountain diversions of approximately 567,400,000 m3 +per year are exported for irrigation, industrial and municipal uses in +northern and eastern Colorado, serving the major population centers of +Denver and Colorado Springs. These diversions are carried through +tunnels, shown as large black dots in the figure.

+_images/basin_map.png +

An important planning consideration is the water rights of each user, +defined by seniority across all water uses (irrigation diversions, +transboundary diversions, power plants etc.) in the basin. To assess the +vulnerability of users with varying degrees of water rights seniority, +planners simulate the system across an ensemble of scenarios using the +state of Colorado’s StateMod platform. The model simulates streamflow, +diversions, instream demands, and reservoir operations.

+

Hadjimichael et al. (2020) employ an exploratory analysis by simulating +a large ensemble of plausible scenarios using StateMod and then +identifying consequential decision-relevant combinations of uncertain +factors, termed scenario discovery. Focusing on decision-relevant +metrics (metrics that are important to the user, the scenario discovery +is applied to the water shortages experienced by each individual user +(i.e., not on a single basin-wide or sector-wide metric). For this +training example, we’ll be performing scenario discovery for three +different water users: two irrigation users and one municipal user.

+
+
+

Let’s get started!#

+

In this tutorial, we will be loading in data that has been produced in +Hadjimichael et al. (2020). Before we start our analysis, we’ll load the +relevant Python libraries, example data, and information for the three +users.

+
#import libraries
+import msdbook
+import numpy as np
+import pandas as pd
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+
+# load example data from Hadjimichael et al. (2020)
+msdbook.install_package_data()
+
+# Select the IDs for the three users that we will perform the analysis for
+all_IDs = ['7000550','7200799','3704614']
+usernames = ['Medium seniority irrigation',
+             'Low seniority irrigation',
+             'Transbasin municipal diversion']
+nStructures = len(all_IDs)
+
+
+
Downloading example data for msdbook version 0.1.5...
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/uncertain_params_bounds.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_metric_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/solutions.resultfile
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LHsamples_original_1000.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/param_values.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/collapse_days.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_params_256samples.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LeafCatch.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_simulations_256samples.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/Robustness.txt
+
+
+
+
+

Step 1: Load Latin hypercube sample and set up problem#

+

To examine regional vulnerability, we generate an ensemble of plausible +future states of the world (SOWs) using Latin Hypercube Sampling. For +this tutorial, we’ll load a file containing 1,000 samples across 14 +parameters. The sampled parameters encompass plausible changes to the +future state of the basin, including changes to hydrology, water demands +(irrigation, municipal & industry, transbasin), and institutional and +environmental factors (environmental flows, reservoir storage, operation +of the Shoshone Power Plant). These samples are taken from ranges +identified in param_bounds. Below we load in the 1000 samples, the +range of values that the samples can take for each parameter, and the +parameter names. More information on what each parameter constitutes can +be found in Table 1 of Hadjimichael et al., 2020.

+
#Identify the bounds for each of the 14 parameters
+param_bounds = msdbook.load_basin_param_bounds()
+
+#Load in the parameter samples
+LHsamples = msdbook.load_lhs_basin_sample()
+
+#Create an array of the parameter names
+param_names=['Irrigation demand multiplier','Reservoir loss','Transbasin demand multiplier',
+             'Municipal & industrial multiplier', 'Shoshone','Environmental flows',
+             'Evaporation change','Mean dry flow','Dry flow variance',
+             'Mean wet flow','Wet flow variance','Dry-dry probability',
+             'Wet-wet probability', 'Snowmelt shift']
+
+
+
+
+

Step 2: Define decision-relevant metrics for illustration#

+

Scenario discovery attempts to identify parametric regions that lead to +‘success’ and ‘failure’. For this demonstration we’ll be defining +‘success’ as states of the world where a shortage level doesn’t exceed +its historical frequency.

+
+
+

Step 3: Run the logistic regression#

+

Logistic regression estimates the probability that a future SOW will be +classified as a success or failure given a set of performance criteria. +A logistic regression model is defined by:

+
+\[ln \bigg (\frac{p_i}{1-p_i} \bigg ) = X^T_i \beta\]
+

where \(p_i\) is the probability the performance in the +\(i^{th}\) SOW will be classified as a success, \(X_i\) is the +vector of covariates describing the \(i^{th}\) SOW, and +\(\beta\) is the vector of coefficients describing the relationship +between the covariates and the response, which here will be estimated +using maximum likelihood estimation.

+

A logistic regression model was fit to the ensemble of SOWs using the +performance criteria defined in step 2. Logistic regression modeling was +conducted using the Statsmodel +Python package. The +data required for the full analysis is too large to include in this +tutorial, but results can be found in the data file loaded below.

+

The results files contain the occurence of different shortage frequency +and magnitude combinations under the experiment, in increments of 10, +between 0 and 100. These combinations (100 for each user) are +alternative decision-relevant metrics that can be used for scenario +discovery.

+
# Set arrays for shortage frequencies and magnitudes
+frequencies = np.arange(10, 110, 10)
+magnitudes = np.arange(10, 110, 10)
+realizations = 10
+
+# Load performance and pseudo r scores for each of the users
+results = [msdbook.load_user_heatmap_array(all_IDs[i]) / 100 for i in range(len(all_IDs))]
+
+
+
+
+

Step 4: Factor ranking#

+

To rank the importance of each uncertain factor, we utilize McFadden’s +psuedo-R2, a measure that quantifies the improvement of the model when +utilizing each given predictor as compared to prediction using the mean +of the data set:

+
+\[R^2_{McFadden}=1-\frac{ln \hat{L}(M_{full})}{ln \hat{L}(M_{intercept})}\]
+

Here \(ln \hat{L}(M_{full})\) is the log likelihood of the full +model (including the predictor) and \(ln \hat{L}(M_{intercept})\) is +the log likelihood of the intercept model (which predicts the mean +probability of success across all SOWs).

+

Higher values of McFadden’s psuedo-R2 indicate higher factor importance +(when the likelihood of the full model approaches one, the ratio of the +likelihood of the full model compared to the intercept model will get +very small).

+
#Load the pseudo-R^2 scores
+scores = [msdbook.load_user_pseudo_scores(all_IDs[i]) for i in range(len(all_IDs))]
+
+# Select indices of frequency and magnitudes that will be used for the visualization
+freq = [1,0,0]
+mag = [7,3,7]
+
+
+
+
+

Step 5: Draw factor maps#

+

The McFadden’s psuedo-R2 scores files contain preliminary logistic +regression results on parameter importance for each of these +combinations. Using these psuedo-R2 scores we will identify the two most +important factors for each metric which we’ll use to generate the final +scenario discovery maps (note: there may be more than two important +metrics for each user, but here we will demonstrate by mapping two).

+
# setup figure
+fig, axes = plt.subplots(3,1, figsize=(6,18), tight_layout=True)
+fig.patch.set_facecolor('white')
+
+for i in range(len(axes.flat)):
+
+    ax = axes.flat[i]
+
+    allSOWsperformance = results[i]
+    all_pseudo_r_scores = scores[i]
+
+    # construct dataframe
+    dta = pd.DataFrame(data=np.repeat(LHsamples, realizations, axis = 0), columns=param_names)
+    dta['Success'] = allSOWsperformance[freq[i],mag[i],:]
+
+    pseudo_r_scores = all_pseudo_r_scores[str(frequencies[freq[i]])+'yrs_'+str(magnitudes[mag[i]])+'prc'].values
+    top_predictors = np.argsort(pseudo_r_scores)[::-1][:2] #Sort scores and pick top 2 predictors
+
+    # define color map for dots representing SOWs in which the policy
+    # succeeds (light blue) and fails (dark red)
+    dot_cmap = mpl.colors.ListedColormap(np.array([[227,26,28],[166,206,227]])/255.0)
+
+    # define color map for probability contours
+    contour_cmap = mpl.cm.get_cmap('RdBu')
+
+    # define probability contours
+    contour_levels = np.arange(0.0, 1.05,0.1)
+
+    # define base values of the predictors
+    SOW_values = np.array([1,1,1,1,0,0,1,1,1,1,1,0,0,0]) # default parameter values for base SOW
+    base = SOW_values[top_predictors]
+    ranges = param_bounds[top_predictors]
+
+    # define grid of x (1st predictor), and y (2nd predictor) dimensions
+    # to plot contour map over
+    xgrid = np.arange(param_bounds[top_predictors[0]][0],
+                      param_bounds[top_predictors[0]][1], np.around((ranges[0][1]-ranges[0][0])/500,decimals=4))
+    ygrid = np.arange(param_bounds[top_predictors[1]][0],
+                      param_bounds[top_predictors[1]][1], np.around((ranges[1][1]-ranges[1][0])/500,decimals=4))
+    all_predictors = [ dta.columns.tolist()[i] for i in top_predictors]
+    dta['Interaction'] = dta[all_predictors[0]]*dta[all_predictors[1]]
+
+    # logistic regression here
+    predictor_list = [all_predictors[i] for i in [0,1]]
+    result = msdbook.fit_logit(dta, predictor_list)
+
+    # plot contour map
+    contourset = msdbook.plot_contour_map(ax, result, dta, contour_cmap,
+                                          dot_cmap, contour_levels, xgrid,
+                                          ygrid, all_predictors[0], all_predictors[1], base)
+
+    ax.set_title(usernames[i])
+
+# set up colorbar
+cbar_ax = fig.add_axes([0.98, 0.15, 0.05, 0.7])
+cbar = fig.colorbar(contourset, cax=cbar_ax)
+cbar_ax.set_ylabel('Probability of Success', fontsize=16)
+cbar_ax.tick_params(axis='y', which='major', labelsize=12)
+
+
+
/srv/conda/envs/notebook/lib/python3.7/site-packages/statsmodels/base/model.py:127: ValueWarning: unknown kwargs ['disp']
+  warnings.warn(msg, ValueWarning)
+
+
+
Optimization terminated successfully.
+         Current function value: 0.378619
+         Iterations 8
+Optimization terminated successfully.
+         Current function value: 0.397285
+         Iterations 8
+Optimization terminated successfully.
+         Current function value: 0.377323
+         Iterations 8
+
+
+_images/notebook_logistic_output_11_1.png +

The figure above demonstrates how different combinations of the +uncertain factors lead to success or failure in different states of the +world, which are denoted by the blue and red dots respectively. The +probability of success and failure are further denoted by the contours +in the figure. Several insights can be drawn from this figure.

+

First, using metrics chosen to be decision-relevant (specific to each +user) causes different factors to be identified as most important by +this scenario-discovery exercise (the x- and y-axes for each of the +subplots). In other words, depending on what the decision makers of this +system want to prioritize they might choose to monitor different +uncertain factors to track performance.

+

Second, in the top panel, the two identified factors appear to also have +an interactive effect on the metric used (shortages of a certain level +and frequency in this example). In terms of scenario discovery, the +Patient Rule Induction Method (PRIM) or Classification And Regression +Trees (CART) would not be able to delineate this non-linear space and +would therefore misclassify parameter combinations as ‘desirable’ when +they were in fact undesirable, and vice versa.

+

Lastly, logistic regression also produces contours of probability of +success, i.e. different factor-value combinations are assigned different +probabilities that a shortage level will be exceeded. This allows the +decision makers to evaluate these insights while considering their risk +aversion.

+
+
+

Tips to Apply Scenario Discovery to Your Own Problem#

+

In this tutorial, we demonstrated how to perform a scenario discovery +analysis for three different users in the UCRB. The analysis allowed us +to determine which parameters the users would be most affected by and to +visualize how different ranges of these parameters lead to success and +failure for different users. This framework can be applicable to any +other application where it is of interest to characterize success and +failure based on uncertain parameter ranges. In order to apply the same +framework to your own problem:

+
    +
  1. Choose sampling bounds for your parameters of interest, which will +represent uncertainties that characterize your system.

  2. +
  3. Generate samples for these parameters (this can be done using the +saltelli.sample function or externally).

  4. +
  5. Define what constitutes success and failure in your problem. In this +tutorial, success was defined based on not surpassing the historical +drought frequency. Choose a metric that is relevant to your problem +and decision-makers that might be involved. If your model involves an +optimization, you can also define metrics based on meeting certain +values of these objectives.

  6. +
  7. Run the parameter sets through your model and calculate success and +failure based on your metrics and across different users if +applicable. This step will allow you to create the scatter plot part +of the final figure.

  8. +
  9. If it is of interest, the contours on the figure can be created by +fitting the logistic regression model in a similiar manner as denoted +in Steps 3 and 5 of the tutorial.

  10. +
+
+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A2.4_hymod.html b/dev/docs/html/A2.4_hymod.html new file mode 100644 index 0000000..ce7d627 --- /dev/null +++ b/dev/docs/html/A2.4_hymod.html @@ -0,0 +1,1538 @@ + + + + + + + + + + + HYMOD Dynamics Tutorial — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

HYMOD Dynamics Tutorial#

+
+

Note

+
+
Run the tutorial interactively: HYMOD Notebook.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

Tutorial: Sensitivity Analysis of the HYMOD Model#

+

The purpose of this tutorial is to demonstrate the global sensitivity +analysis concepts and tools established in the Section 3.1 of the main +text of this eBook. This demonstration will highlight the central role +of design of experiments (Section 3.3), when implementing global +sensitivity analysis tools described in Section 3.4.

+

We’ll explore these tools and concepts using the HYdrological MODel +(HYMOD), a rainfall-runoff model developed and used for river flow +forecasting. HYMOD was chosen for demonstration because its purpose is +to abstract highly complex and non-linear systems. The methods +demonstrated in thistutorial can be applied to numerical models that +simulate other complex non-linear systems.

+

This tutorial will first introduce HYMOD and use it to simulate +streamflows in a river basin. Next, we’ll employ sensitivity analysis +concepets described in Section 3 of the main text to examine how values +of HYMOD’s parameters impact streamflow predictions. We’ll then explore +how the effects of these parameters may change over time using +time-varying sensitivtiy analysis. Finally, we’ll demonstrate concepts +presented in Chapter 7 through two ensemble-based methods of uncertainty +quantification - Generalized Likelihood Uncertainty Estimation (GLUE) +and Pre-Calibration.

+

The tutorial includes the following steps:

+
+

1. Introduction to HYMOD#

+

1.1 - Introduction to a simple hydrologic model (HYMOD)

+

1.2 - Input Data

+

1.3 - Running a basic simulation

+

1.4 - Model outputs

+
+
+

2. Global Sensitivity Analysis#

+

2.1 - Design of Experiments

+

2.2 - Sensitivity analysis for one output

+

2.3 - Sensitivity analysis across multiple outputs

+

2.4 - Time-varying sensitivity analysis

+
+
+
+

1 - Introduction to HYMOD#

+

1.1 Overview

+

HYMOD is a hydrologic model (rainfall-runoff model) that simulates key +hydrologic fluxes such as infiltration, streamflow and +evapotranspiration. The model was originally developed and used for +river flow forecasting, but it has also been been used to explore +different sensitivity analysis (e.g., Herman et al., +2013), +uncertainty quantification (e.g., Smith et al., +2008), +and optimization (e.g., Ye et al., +2014) +concepts.

+

HYMOD accepts two inputs - daily precepitation and daily potential +evapotranspiration (PET)- and generates predicitons of daily streamflow. +HYMOD abstracts the highly non-linear process of runoff routing by +dividing the flow into two components: quick flow, representing +precipitation that quickly runs off the surface of the watershed into +the stream, and slow flow, that moves through the soil and takes much +longer to arrive at the stream.

+

To generate streamflow predictions, HYMOD first models vertical +processes within the watershed to determine how much water infiltrates +and evaporates from the soil at a given time step. It then determines +how much water should be partitioned into quick flow and slow flow +processes. Within each process it abstracts residence time (the time it +takes a unit volume of water to move through the watershed and into the +stream) using a series of “reservoirs” each with a calibrated residence +time.

+

HYMOD’s representation of hydrologic processes are shown Figure 1 below +and controlled by the following parameters:

+

\(H_{uz}\): the maximum water storage capacity of the soil (mm)

+

\(B_{exp}\): parameters describing the degree of spatial variability +within the basin between 0 and Huz

+

\(Alp\): Fraction of runoff contributing to quick flow

+

\(K_q\): Quick flow residence time of linear infinite reservoir (the +Kq values of all three linear reservoirs are the same)

+

\(K_s\): Slow flow residence time of linear infinite reservoir

+_images/hymod_schematic-DAVE.png +

HYMOD models the fraction of water that is stored in the soil +\((F(XH_{uz}))\) using the following relationship:

+
+\[F(XH_{uz}) = 1 - (1 - \frac{XH_{uz}}{H_{uz}})^{B}\]
+

where \(XH_{uz}\) is the water storage capacity of the soil; +\(H_{uz}\) is the parameter describing basin maximum water +storage capacity (mm); and \(B\) is the parameter describing the +degree of spatial variability within the basin.

+

The portion of precipitation that exceeds the water storage capacity is +treated as runoff.

+

To route runoff to streamflow, the excess runoff from the vertical +processes is split into quick flow and slow flow. The proportion of +runoff partitioned into quick flow and slow flow is determined by a +parameter \(Alp\), which ranges between 0 and 1. Quick flow is +routed through \(N\) identical quick flow tanks \(Q1, Q2... QN\) +(shown above as \(N=3\)). The rate at which runoff moves through the +quick flow system is described by the residence time of the quick frlow +tanks, \(Kq\) (day). Slow flow is routed through a parallel slow +flow tank and the rate at which slow flow is routed is described by the +slow flow residences time, \(Ks\) (day).

+

Citation: Wagener, T., Boyle, D. P., Lees, M. J., Wheater, H. S., Gupta, +H. V., & Sorooshian, S. (2001). A framework for development and +application of hydrological models. Hydrology and Earth System Sciences, +5(1), 13-26.

+

1.2 Input data

+

The HYMOD model only requires precipitation and potential +evapotranspiration as inputs. For this example, we’ll run HYMOD using +data from the Leaf River, a humid catchment located north of Collins +Mississippi that has been widely used to explore HYMOD. The dataset also +includes daily observed runoff that we later use to evaluate the +performace of each sensitvity analysis sample set.

+

In the following section of code, we’ll load the necessary python +libraries and read in the input file. For this exercise we’ll only use +the first eleven years of data. The first five rows of the input dataset +are printed to show what they look like:

+
import msdbook
+
+import numpy as np
+import pandas as pd
+import seaborn as sns
+
+from sklearn import metrics
+from matplotlib import pyplot as plt
+
+# load example data
+msdbook.install_package_data()
+
+# load the Leaf River HYMOD input file
+leaf_data = msdbook.load_hymod_input_file()
+
+# extract the first eleven years of data
+leaf_data = leaf_data.iloc[0:4015].copy()
+
+print('Leaf River Data structure:')
+
+# There are only three columns in the file including precipitation, potential evapotranspiration and  streamflow
+leaf_data.head()
+
+
+
Downloading example data for msdbook version 0.1.5...
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/uncertain_params_bounds.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_metric_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/solutions.resultfile
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LHsamples_original_1000.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/param_values.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/collapse_days.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_params_256samples.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LeafCatch.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_simulations_256samples.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/Robustness.txt
+Leaf River Data structure:
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PrecipPot_ETStrmflw
00.04.600.29
10.04.310.24
20.04.330.21
30.04.780.19
40.02.910.18
+

To visualize catchment hydrology, streamflow and precipitation data are +usually plotted together as a combined hydrograph (streamflow ) and +hyetograph (rainfall, from Greek.hyetos, “rain”). Streamflow is plotted +as a time series, while rainfall is shown as an inverted bar plot along +the top of the graph. Streamflow labels are shown on the left y-axis, +while rainfall labels are shown on the right y-axis.

+
# make an axis for the hydrograph
+fig, strmflw_ax = plt.subplots(figsize=[12,6])
+strmflw_ax.set_ylim([0, 50])
+
+#make a second y-axis for the hyetograph
+precip_ax = strmflw_ax.twinx()
+precip_ax.set_ylim([0, 200])
+precip_ax.invert_yaxis()
+
+precip = leaf_data['Precip']
+strmflw_ax.plot(range(0, len(leaf_data['Precip'])), leaf_data['Strmflw'], color='lightcoral')
+strmflw_ax.set_ylabel('Streamflow (mm/day)')
+
+precip_ax.bar(range(0, len(leaf_data['Precip'])), leaf_data['Precip'], width=2)
+precip_ax.set_ylabel('Rainfall (mm/day)')
+precip_ax.legend(['Precip'], loc='center right')
+strmflw_ax.legend(['Streamflow'],bbox_to_anchor=(1, 0.48))
+
+
+
<matplotlib.legend.Legend at 0x7f53b95c6850>
+
+
+_images/hymod1.png +

1.3 Running a Baseline Model Simulation

+

We’ll start our experiment by running HYMOD using its default +parameters.

+
# assign input parameters to generate a baseline simulated streamflow
+Nq = 3  # Number of quickflow routing tanks
+Kq = 0.5 # Quickflow routing tanks' rate parameter
+Ks =  0.001 # Slowflow routing tank's rate parameter
+Alp = 0.5 # Quick/slow split parameter
+Huz = 100 # Maximum height of soil moisture accounting tank
+B = 1.0 # Scaled distribution function shape parameter
+
+# Note that the number of years is 11. One year of model warm-up and ten years are used for actual simulation
+model = msdbook.hymod(Nq, Kq, Ks, Alp, Huz, B, leaf_data, ndays=4015)
+
+
+

1.4 Model Outputs

+

Model outputs include actual evapotranspiration, quick and fast +streamflow, and combined runoff. In this tutorial we focus on the total +daily runoff, QQ (\(m^3/s\)). We can use the following script to +plot simulated streamflow against observed streamflow.

+
+

Tip

+

View the source code used to create this plot here: plot_observed_vs_simulated_streamflow

+
+
ax = msdbook.plot_observed_vs_simulated_streamflow(df=leaf_data, hymod_dict=model)
+
+
+_images/hymod2.png +

So how does our model perform? We can investigate model performance +across several metrics:

+

1: Mean Absolute Error (MAE); MAE conveys how the model performs on +average across the 10 year simulation period, with smaller values +indicating better performance. The absolute value is taken so that +positive and negative errors do not cancel each other out.

+
+\[MAE = \frac{1}{N}\sum_{t=0}^N\left\lvert Q_{sim,t}-Q_{obs,t}\right\rvert\]
+

2: Root Mean Square Error (RMSE); RMSE is sum of square errors across +the 10 year simulation period. RMSE is sensitive to large errors between +the historical record and the simulated flows, and thus is useful for +highlighting the model’s ability to capture of extreme flood events.

+
+\[RMSE = \sqrt{\frac{1}{N}\sum_{t=1}^{N}(Q_{sim,t}-Q_{obs,t})^2}\]
+

3: Log-Root Mean Square Error (Log(RMSE)) LOG(RMSE) focuses on model +performance during low-flow events.

+
+\[LOG(RMSE) = log(RMSE)\]
+
mae = np.mean(abs(leaf_data['Strmflw'] - model['Q']))
+mse = metrics.mean_squared_error(model['Q'], leaf_data['Strmflw'])
+rmse = mse**(1/2)
+
+print('MAE: ' + str(mae) + '\nRMSE: ' + str(mse) + '\nLOG(RMSE): ' + str(rmse))
+
+
+
MAE: 1.0787471470460999
+RMSE: 4.375695937555197
+LOG(RMSE): 2.0918164206151544
+
+
+

The error metrics show that HYMOD performs reasonably well, the MAE is +around 1 \(m^3/s\), the RMSE is on the order of 10% of the largest +observed streamflow and the LOG(RMSE) is fairly low.

+
+
+

2- Global Sensitivity Analysis#

+

2.1 Experimental Design and Setup

+

Now we’ll examine how sensitive streamflow simulations generated by +HYMOD are to the model’s input parameters. We’ll perform global +sensitivity analysis (see Section 3.1 of the main text) using the SALib +Python library.

+
from SALib.sample import saltelli
+from SALib.analyze import sobol
+from SALib.analyze import delta
+
+
+

A first and critical step when conducting sensitivity analysis is +determining the experimental design (see Design of Experiments, Section +3.4 of the main text). Our experimental design involves defining the +uncertainties that we’ll be examining, the output of interest, the +ranges of each uncertainty that will be explored and the strategy for +sampling the uncertainty space.

+

For this experiment we’ll explore the five parameters highlighted in +Figure 1. We’ll draw their ranges from existing literature on the model +(note Jon H. paper). We’ll use a Sobol sampling an a quasi-random +sampling with low sequences approach to sample the uncertainty space +(Section 3.3.4).

+

In this demonstration we’ll utilize Sobol Sensitivity Analysis, a +variance based method (Section 3.4.5).

+

To explore HYMOD’s behavoir, we’ll examine the sensitivity of four model +ouputs to input parameters: 1) predicted flow, 2) Mean Absolute Error +(compared with the calibaration data set), 3) Root Mean Square Error and +4) Log Root Mean Square Error.

+

This analysis will employ SALib, a Python implementation also utilized +in the other SA tutorial (make this more formal).

+

To start our analysis, we’ll create a dictionary that describes our +model uncertainties and their ranges, this dictionary is named +“problem_hymod” (SALib refers to these dictionaries as “problems”).

+
problem_hymod = {
+    'num_vars': 5,
+    'names': ['Kq', 'Ks', 'Alp', 'Huz', 'B'],
+    'bounds': [[0.1, 1],  # Kq
+               [0, 0.1],  # Ks
+               [0, 1],    # Alp
+               [0.1, 500],  # Huz
+               [0, 1.9]]  # B
+}
+
+
+

After defining our uncertainites and ranges, we’ll use SALib to sample +the uncertainty space and run the model for each of the sample sets. We +will load a sample that has already been created param_values_hymod +for demonstration purposes. For HYMOD, literature recommends running at +least N = 10,000 samples, to keep this demonstration easy to run +however, we utilize only 256 sobol samples of uncertainties. To generate +accurate approximations of second order sensitivity indicies SALib +generates N*(2k+2) sets of samples, where N=256 and k=5 (number of +uncertainties). For the math behind why this is needed, see (Saltelli, +A., 2002. Making best use of model evaluations to compute sensitivity +indices. Computer Physics Communications 145, 280–297. +https://doi.org/10.1016/S0010-4655(02)00280-1).

+

The actual model simulation takes an extended period, so we also load +the simulation data from a previous run. The following demonstrates how +to conduct this analysis:

+
# generate 256 samples.
+param_values_hymod = saltelli.sample(problem_hymod, 256)
+
+# dictionary to store outputs in
+d_outputs = {}
+
+# run simulation for each parameter sample
+for i in range(0, len(param_values_hymod)):
+
+    # run model for each sensitivity analysis parameter sets
+    hymod_output = msdbook.hymod(Nq,
+                                 param_values_hymod[i, 0],
+                                 param_values_hymod[i, 1],
+                                 param_values_hymod[i, 2],
+                                 param_values_hymod[i, 3],
+                                 param_values_hymod[i, 4],
+                                 leaf_data,
+                                 ndays=4015)
+
+    # store the simulated total flow discharge
+    d_outputs[f"Q{i}"] = hymod_output["Q"]
+
+
+Q_df_bw = pd.DataFrame(d_outputs)
+
+
+
# load previously generated parameter values
+param_values_hymod = msdbook.load_hymod_params()
+
+# number of samples
+n_samples = len(param_values_hymod)
+
+# load previously generated hymod simulated outputs
+Q_df_bw = msdbook.load_hymod_simulation()
+
+# column names of each sample simulation number
+sample_column_names = [i for i in Q_df_bw.columns if i[0] == 'Q']
+
+
+
+

Running HYMOD - Model Warm-up#

+

A hydrological model such as HYMOD usually includes ordinary +differential equations that are sensitive to their initial condition. +They also have components in their underlying formulation that have long +memory such that prior time steps can affect their current simulations. +For example, soil moisture or groundwater can hold water for a long time +and therefore they are often considered to exhibit a long memory. This +can affect the partitioning of water to runoff and infiltration, while +also controlling the generation of base flow. Therefore, it is important +to have a reasonable initial value for them. To achieve this, +hydrologists usually extend their simulation period and after the +simulations, they remove that extended time period that has unreasonable +groundwater or surface water values. This time period is called the +warm-up time period.

+

Here we extended our simulation for one year (from 10 years to 11 years) +and we removed the first year of simulation, therefore our warm-up +period is one year.

+
# exclude the first year of simulation from the simulations and reset the index
+Q_df = Q_df_bw.iloc[365:4015].copy().reset_index(drop=True)
+
+# exclude the first year of the input data and reset the index
+leaf_data = leaf_data.iloc[365:4015].copy().reset_index(drop=True)
+
+
+

Now that HYMOD has been warmed up, we’ll examine how HYMOD’s streamflow +outputs vary under different sample sets, and compare them with the +observed streamflow.

+
# add date columns to our simulation data frame; for this data our start date is 1/1/2000
+date_ts = pd.date_range(start='1/1/2000', periods=3650, freq='D')
+Q_df['date'] = date_ts
+Q_df['year'] = date_ts.year
+Q_df['month'] = date_ts.month
+Q_df['day'] = date_ts.day
+
+# aggregate the simulated observed streamflow to monthly mean
+df_sim_mth_mean = Q_df.groupby(['year', 'month'])[sample_column_names].mean()
+
+# do the same for the observed data
+date_ts = pd.date_range(start='1/1/2000', periods=len(leaf_data), freq='D')
+leaf_data['date'] = date_ts
+leaf_data['year'] = date_ts.year
+leaf_data['month'] = date_ts.month
+leaf_data['day'] = date_ts.day
+
+# aggregate the daily observed streamflow to monthly mean
+df_obs_mth_mean = leaf_data.groupby(['year', 'month']).mean()
+
+
+
+

Tip

+

View the source code used to create this plot here: plot_observed_vs_sensitivity_streamflow

+
+
ax = msdbook.plot_observed_vs_sensitivity_streamflow(df_obs=df_obs_mth_mean,
+                                                     df_sim=df_sim_mth_mean)
+
+
+_images/hymod3.png +

2.2 Sensitivity of streamflows to model parameters

+

Now we’ll examine how each of HYMOD’s parameters impact the variance of +simulated streamflows. Using SALib we’ll calculate the first order and +total order sensitivity indicies of each model parameter. The first +order sensitivity index measure’s the individual impact that a given +parameter has on the variance of the simulated streamflows. The total +order index measures the impact of a given parameter along with all +interactions that other parameters have with the given parameter on +simulated streamflows.

+

We’ll start with an matrix, Y, which contains our simulated streamflows +for every uncertainty sample. We’ll then use the sobol.analyze function +from SALib to calculate the sensitivity indicies (Si). The arguments for +this function are the problem dictionary defined in part 2.2 of this +tutorial, and the matrix of simulated streamflows, Y.

+
# overall aggregated indices
+Y = Q_df[sample_column_names].mean().to_numpy()
+
+# Perform analysis
+Si = sobol.analyze(problem_hymod, Y)
+
+
+

Now we can examine our results, we’ll print the first order and total +order Si’s for each parameter, then visualize the results with bar plots

+
print('First order indices = ', Si['S1'])
+
+print('Total order indicies = ', Si['ST'])
+
+sns.set_style('white')
+fig = plt.figure(figsize=(8,4))
+ax1 = fig.add_subplot(121)
+ax1.bar(np.arange(5), Si['S1'])
+ax1.set_xticklabels(['','Kq', 'Ks', 'Alp', 'Huz', 'B'])
+ax1.set_ylabel('First order Si')
+ax1.set_ylim([0,1])
+
+ax2 = fig.add_subplot(122)
+ax2.bar(np.arange(5), Si['ST'])
+ax2.set_xticklabels(['','Kq', 'Ks', 'Alp', 'Huz', 'B'])
+ax2.set_ylabel('Total order Si')
+ax2.set_ylim([0,1])
+
+
+
First order indices =  [9.55550001e-05 7.49249463e-04 5.62386413e-04 7.03327551e-01
+ 2.53701895e-01]
+Total order indicies =  [1.76174200e-06 1.63288175e-03 3.41378460e-04 6.88983864e-01
+ 2.53922146e-01]
+
+
+
/srv/conda/envs/notebook/lib/python3.7/site-packages/ipykernel_launcher.py:9: UserWarning: FixedFormatter should only be used together with FixedLocator
+  if __name__ == '__main__':
+/srv/conda/envs/notebook/lib/python3.7/site-packages/ipykernel_launcher.py:15: UserWarning: FixedFormatter should only be used together with FixedLocator
+  from ipykernel import kernelapp as app
+
+
+
(0.0, 1.0)
+
+
+_images/hymod4.png +

Our findings indicate that in this instance, the streamflow estimate +from HYMOD is highly sensitive to soil moisture parameters Huz and B and +hardly affected by the routing parameters. Notably, there is very little +interactions between parameters causing the total order indicies to be +nearly identical to the first order indicies.

+

2.3 How do different performance metrics affect the results of our +sensitivity analysis?

+

Streamflow has many different properties. In this section, we discuss +how the selection of metrics can lead to fundamentally different +sensitivity analysis results. For example, one can only focus on +aggregated streamflow metrics such as mean (what has been presented so +far), or only on extreme events such as drought or floods.

+

Here we compare three different metrics: 1- Mean error (ME) 2- Root Mean +Square Error (RMSE) 3- Log-Root Mean Square Error (Log(RMSE))

+

Each of these metrics focuses on a specific attribute of streamflow. For +example, RMSE highlights the impacts of extreme flood events, while +LOG(RMSE) focuses on model performance during low-flow events.

+
# calculate error metrics
+mae = Q_df[sample_column_names].apply(lambda x: abs(x-leaf_data["Strmflw"]), axis=0)
+mse = Q_df[sample_column_names].apply(lambda x: metrics.mean_squared_error(x, leaf_data["Strmflw"]), axis=0)
+rmse = mse**(1/2)
+
+# add error metrics to a dictionary
+d_metrics = {'MAE': mae.mean().values,
+             'RMSE': rmse.values,
+             'LOG[RMSE]': np.log10(rmse.values)}
+
+# convert to a dataframe
+df_metrics_SA = pd.DataFrame(d_metrics)
+
+
+

We can use the following to calculate the SA indices for each metric and +visualize it.

+
df_metric_s1_result = pd.DataFrame(np.zeros((3, 5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+df_metric_sT_result = pd.DataFrame(np.zeros((3, 5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+
+# conduct sensitivity analysis for each metric
+for index, i in enumerate(d_metrics.keys()):
+
+    # get the data as a numpy array for the target metric
+    Y = d_metrics[i]
+
+    # use the metric to conduct SA
+    Si = sobol.analyze(problem_hymod, Y, print_to_console=False)
+
+    # add the sensitivity indices to the output data frame
+    df_metric_s1_result.iloc[index, :] = Si['S1']
+    df_metric_sT_result.iloc[index, :] = Si['ST']
+
+
+
# create seaborn heatmap with required labels
+fig = plt.figure(figsize=(12,4))
+ax1 = fig.add_subplot(121)
+# labels for y-axis
+y_axis_labels = ['Mean Absoulte Error', 'RSME', 'Log(RMSE)']
+
+# plot heatmap
+ax1 = sns.heatmap(df_metric_s1_result, yticklabels=y_axis_labels, annot=True,  cmap='inferno_r', cbar_kws={'label': 'Si'}, cbar=False)
+ax1.figure.axes[-1].yaxis.label.set_size(14)
+ax1.set_title('First Order Sensitivity')
+
+ax2 = fig.add_subplot(122)
+ax2 = sns.heatmap(df_metric_sT_result, yticklabels=y_axis_labels, annot=True,  cmap='inferno_r', cbar_kws={'label': 'Si'})
+ax2.figure.axes[-1].yaxis.label.set_size(14)
+ax2.set_title('Total Order Sensitivity')
+
+
+
Text(0.5, 1.0, 'Total Order Sensitivity')
+
+
+_images/hymod5.png +

The first order sensitivity indicies indicate that HYMOD’s sensitivity +to its parameters is different depending on how its output is measured. +Unsurprisingly, the mean absolute error is highly sensitive to the soil +moisture accounting parameters Huz and B, just like the overall +streamflow predictions above. However, when we examine RMSE and +log(RMSE), the routing parameters Alp become sensitive, and the +sensitivity to parameter B is reduced. As described above, RMSE and +LOG(RMSE) respond to model performance in high-flow and low flow periods +respectively. Our results indicate that for these flow regimes Alp, the +parameter that governs the split between quick and slow flow is an +important factor. While still the parameter with the highest most effect +on all three measures, Huz is much less influential for RMSE and +LOG(RMSE) than it is for MAE.

+

The total order sensitivity indicies review a different, more complex +story. While the MAE sensitivity is relatively governed by first order +effects (like the streamflow predictions above), the RMSE and LOG(RMSE) +error metrics show significant interactions. Alp has the highest total +order sensitivity for RMSE and is eqal to Huz for Log(RMSE). Kq, which +has a relatively low first order sensitivity index, shows strong +contribution to variance when interactions are taken into account.

+

Radial convergence plots are a helpful way to visualize the interactions +between parameters. These plots array the model parameters in a circle +and plot the first order, total order and second order Sobol sensitivity +indices for each parameter. The first order sensitivity is shown as the +size of a closed circle, the total order as the size of a larger open +circle and the second order as the thickness of a line connecting two +parameters. Below is an example of a radial convergence plot for the +LOG(RMSE) measure. The plot indicates strong interactions between the +Huz and Alp parameters, as well as Alp and Kq. There is also an +interaction between Alp and Ks.

+
import numpy as np
+import itertools
+import seaborn as sns
+import math
+sns.set_style('whitegrid', {'axes_linewidth': 0, 'axes.edgecolor': 'white'})
+
+def is_significant(value, confidence_interval, threshold="conf"):
+    if threshold == "conf":
+        return value - abs(confidence_interval) > 0
+    else:
+        return value - abs(float(threshold)) > 0
+
+def grouped_radial(SAresults, parameters, radSc=2.0, scaling=1, widthSc=0.5, STthick=1, varNameMult=1.3, colors=None, groups=None, gpNameMult=1.5, threshold="conf"):
+    # Derived from https://github.com/calvinwhealton/SensitivityAnalysisPlots
+    fig, ax = plt.subplots(1, 1)
+    color_map = {}
+
+    # initialize parameters and colors
+    if groups is None:
+
+        if colors is None:
+            colors = ["k"]
+
+        for i, parameter in enumerate(parameters):
+            color_map[parameter] = colors[i % len(colors)]
+    else:
+        if colors is None:
+            colors = sns.color_palette("deep", max(3, len(groups)))
+
+        for i, key in enumerate(groups.keys()):
+            #parameters.extend(groups[key])
+
+            for parameter in groups[key]:
+                color_map[parameter] = colors[i % len(colors)]
+
+    n = len(parameters)
+    angles = radSc*math.pi*np.arange(0, n)/n
+    x = radSc*np.cos(angles)
+    y = radSc*np.sin(angles)
+
+    # plot second-order indices
+    for i, j in itertools.combinations(range(n), 2):
+        #key1 = parameters[i]
+        #key2 = parameters[j]
+
+        if is_significant(SAresults["S2"][i][j], SAresults["S2_conf"][i][j], threshold):
+            angle = math.atan((y[j]-y[i])/(x[j]-x[i]))
+
+            if y[j]-y[i] < 0:
+                angle += math.pi
+
+            line_hw = scaling*(max(0, SAresults["S2"][i][j])**widthSc)/2
+
+            coords = np.empty((4, 2))
+            coords[0, 0] = x[i] - line_hw*math.sin(angle)
+            coords[1, 0] = x[i] + line_hw*math.sin(angle)
+            coords[2, 0] = x[j] + line_hw*math.sin(angle)
+            coords[3, 0] = x[j] - line_hw*math.sin(angle)
+            coords[0, 1] = y[i] + line_hw*math.cos(angle)
+            coords[1, 1] = y[i] - line_hw*math.cos(angle)
+            coords[2, 1] = y[j] - line_hw*math.cos(angle)
+            coords[3, 1] = y[j] + line_hw*math.cos(angle)
+
+            ax.add_artist(plt.Polygon(coords, color="0.75"))
+
+    # plot total order indices
+    for i, key in enumerate(parameters):
+        if is_significant(SAresults["ST"][i], SAresults["ST_conf"][i], threshold):
+            ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["ST"][i]**widthSc)/2, color='w'))
+            ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["ST"][i]**widthSc)/2, lw=STthick, color='0.4', fill=False))
+
+    # plot first-order indices
+    for i, key in enumerate(parameters):
+        if is_significant(SAresults["S1"][i], SAresults["S1_conf"][i], threshold):
+            ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["S1"][i]**widthSc)/2, color='0.4'))
+
+    # add labels
+    for i, key in enumerate(parameters):
+        ax.text(varNameMult*x[i], varNameMult*y[i], key, ha='center', va='center',
+                rotation=angles[i]*360/(2*math.pi) - 90,
+                color=color_map[key])
+
+    if groups is not None:
+        for i, group in enumerate(groups.keys()):
+            print(group)
+            group_angle = np.mean([angles[j] for j in range(n) if parameters[j] in groups[group]])
+
+            ax.text(gpNameMult*radSc*math.cos(group_angle), gpNameMult*radSc*math.sin(group_angle), group, ha='center', va='center',
+                rotation=group_angle*360/(2*math.pi) - 90,
+                color=colors[i % len(colors)])
+
+    ax.set_facecolor('white')
+    ax.set_xticks([])
+    ax.set_yticks([])
+    plt.axis('equal')
+    plt.axis([-2*radSc, 2*radSc, -2*radSc, 2*radSc])
+    #plt.show()
+
+
+    return fig
+
+# define groups for parameter uncertainties
+groups={"Soil Moisture" : ["Huz", "B"],
+        "Routing" : ["Alp", "Kq", "Ks"]}
+
+
+fig = grouped_radial(Si, ['Kq', 'Ks', 'Alp', 'Huz', 'B'], groups=groups, threshold=0.025)
+
+
+
Soil Moisture
+Routing
+
+
+_images/hymod6.png +

2.4 Time-Varying Sensitivity Analysis

+

In section 2.5 we saw how performing sensitivity analysis on different +measurements of model output can yeild in different results on the +importance of each uncertain input. In this section we’ll examine how +performing this analysis over time can yeild additional insight into the +performance of HYMOD. We’ll first examine how model sensitivities vary +by month, then examine how they change across each year of the +simulation.

+

For this demonstration, we’ll focus only on the monthly streamflow +predictions generated by HYMOD.

+
+

2.4.1 Sensitivity analysis indices for each month#

+
# aggregate simulated streamflow data to monthly time series
+df_sim_by_mth_mean = Q_df.groupby('month')[sample_column_names].mean()
+
+# aggregate observed streamflow data to monthly time series
+df_obs_by_mth_mean = leaf_data.groupby('month').mean()
+
+
+

We can use the following to calculate the SA indices for each month and +visualize it. Results are pre-loaded for efficiency.

+
# set up dataframes to store outputs
+df_mth_s1 = pd.DataFrame(np.zeros((12,5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+df_mth_delta = df_mth_s1.copy()
+
+# iterate through each month
+for i in range(0, 12):
+
+    # generate the simulation data
+    Y = df_sim_by_mth_mean.iloc[i, :].to_numpy()
+
+    # run SA
+    Si = delta.analyze(problem_hymod, param_values_hymod, Y, print_to_console=False)
+
+    # add to output dataframes
+    df_mth_s1.iloc[i, :] = np.maximum(Si['S1'], 0)
+    df_mth_delta.iloc[i, :] = np.maximum(Si['delta'], 0)
+
+# convert to arrays
+arr_mth_s1 = df_mth_s1.values
+arr_mth_delta = df_mth_delta.values
+
+
+
+
+
+

First-order Indices#

+

The following can be used to visualize the time-varying first-order +indices. The first order represents the direct impacts of a specific +parameter on model outputs.

+
+

Tip

+

View the source code used to create this plot here: plot_monthly_heatmap

+
+
# load previously ran data
+arr_mth_delta, arr_mth_s1 = msdbook.load_hymod_monthly_simulations()
+
+# plot figure
+ax, ax2 = msdbook.plot_monthly_heatmap(arr_sim=arr_mth_s1.T,
+                                       df_obs=df_obs_by_mth_mean,
+                                       title='First Order - Mean Monthly SA')
+
+
+_images/hymod7.png +

This figure demonstrates the first order sensitivity indices when the +streamflow data are aggregated by month. The purple line represents the +observed monthly discharge. The figure indicates that the first order +indices are highest for B and Huz across all months and lowest for Alp, +Ks, and Kq. Note that in the months with the highest flow, Ks becomes an +influential parameter.

+
+
+

Total-order indices#

+

We can also focus on the total order sensitivity index that includes +first-order SA indices and interactions between parameters

+
# plot figure
+ax, ax2 = msdbook.plot_monthly_heatmap(arr_sim=arr_mth_delta.T,
+                                       df_obs=df_obs_by_mth_mean,
+                                       title='Total Order - Mean monthly SA')
+
+
+_images/hymod8.png +

Notably, the total order sensitivity results are different than the +first order sensitivity results, which indicates that interactions +between the parameters (particularly in regards to routing parameters +\(Kq\), \(Ks\), and \(Alp\)) contribute to changes in HYMOD +output.

+
+

2.4.2 Annual sensitivity analysis indices#

+
# group by year and get mean
+df_sim_by_yr_mean = Q_df.groupby(['year'])[sample_column_names].mean()
+
+# group input data and get mean
+df_obs_by_yr_mean = leaf_data.groupby(['year']).mean()
+
+
+

We can also calculate the sensitivity analysis indices for each +individual year. This will allow us to understand if model control +changes during different years. The following code first aggregates the +outputs to annual time steps, and then calculates the SA indices.

+
# set up dataframes to store outputs
+df_yr_s1 = pd.DataFrame(np.zeros((10, 5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+df_yr_delta = df_yr_s1.copy()
+
+# iterate through each year
+for i in range(0, 10):
+
+    # generate the simulation data
+    Y = df_sim_by_yr_mean.iloc[i, :].to_numpy()
+
+    # run SA
+    Si = delta.analyze(problem_hymod, param_values_hymod, Y, print_to_console=False)
+
+    # add to output dataframes
+    df_yr_s1.iloc[i, :] = np.maximum(Si['S1'], 0)
+    df_yr_delta.iloc[i, :] = np.maximum(Si['delta'], 0)
+
+# convert to arrays
+arr_yr_s1 = df_mth_s1.values
+arr_yr_delta = df_mth_delta.values
+
+
+
+
+
+

First-order indices#

+
+

Tip

+

View the source code used to create this plot here: plot_annual_heatmap

+
+
# load previously ran data
+arr_yr_delta, arr_yr_s1 = msdbook.load_hymod_annual_simulations()
+
+# plot figure
+ax, ax2 = msdbook.plot_annual_heatmap(arr_sim=arr_yr_s1.T,
+                                      df_obs=df_obs_by_yr_mean,
+                                      title='First Order - Mean Annual SA')
+
+
+_images/hymod9.png +

The first order sensitivities at the annual scale are not unlike the +first order monthly sensitivities. Once again, sensitivities vary across +year and Huz and B are the most consequential parameters.

+
+
+

Total-order indices#

+
# plot figure
+ax, ax2 = msdbook.plot_annual_heatmap(arr_sim=arr_yr_delta.T,
+                                      df_obs=df_obs_by_yr_mean,
+                                      title='Total Order - Mean Annual SA and Observed flow')
+
+
+_images/hymod10.png +

Our results indicate that sensitivity analysis indices vary in different +years and now that interactions are included, the Kq, Ks, and Alp +variables impact the sensitivity of the streamflow output.

+
+

2.4.3 Monthly time-varying sensitivity analysis#

+

Although time-varying sensitivity analysis (TVSA) at average monthly and +average annual temporal resolutions is informative, TVSA is susceptible +to the aggregation issue that we discussed earlier in section 3-2. To +avoid that we can further discretize our time domain to zoom into +individual months. This will provide us with even more information about +model behavior and the sensitivity of different parameters in different +states of the system. The block of code demonstrates how to implement +the monthly TVSA.

+
# set up dataframes to store outputs
+df_vary_s1 = pd.DataFrame(np.zeros((df_obs_mth_mean.shape[0], 5)),
+                          columns=['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+
+df_vary_delta = df_vary_s1.copy()
+
+# iterate through each month
+for i in range(0, df_obs_mth_mean.shape[0]):
+
+    # generate the simulation data
+    Y = df_sim_mth_mean.iloc[i, :].to_numpy()
+
+    # run SA
+    Si = delta.analyze(problem_hymod, param_values_hymod, Y, print_to_console=False)
+
+    # add to output dataframes
+    df_vary_s1.iloc[i, :] = np.maximum(Si['S1'], 0)
+    df_vary_delta.iloc[i, :] = np.maximum(Si['delta'], 0)
+
+# convert to arrays
+arr_vary_s1 = df_vary_s1.values
+arr_vary_delta = df_vary_delta.values
+
+
+
+
+
+

First-order indices#

+
+

Tip

+

View the source code used to create this plot here: plot_varying_heatmap

+
+
# load in previously ran data
+arr_vary_delta, arr_vary_s1 = msdbook.load_hymod_varying_simulations()
+
+# plot figure
+ax, ax2 = msdbook.plot_varying_heatmap(arr_sim=arr_vary_s1.T,
+                                      df_obs=df_obs_mth_mean,
+                                      title='First Order - Time-Varying SA')
+
+
+_images/hymod11.png +

Compared to the TVSA when streamflow was aggregated, this figure +suggests that Kq is indeed a relevant parameter for influencing +streamflow output when individual months are considered.

+
+
+

Total order - time varying sensitivity analysis#

+
# plot figure
+ax, ax2 = msdbook.plot_varying_heatmap(arr_sim=arr_vary_delta.T,
+                                      df_obs=df_obs_mth_mean,
+                                      title='Total Order - Time-Varying SA')
+
+
+_images/hymod12.png +

As above, the total order sensitivities further indicate the importance +of Kq that is not apparent if aggregation is utilized.

+
+
+
+

Tips to Apply this methodology to your own problem#

+

In this tutorial, we demonstrated how to use global sensitivtiy analysis +to explore a complex, non-linear model. We showed how measuring +sensitivity across multiple measures of model performance and temporal +aggregations yeilding differing results about model +sensitivity/behavoir. While these results may seem contraditory, they +provide useful insight into the behavoir of HYMOD. Would we expect the +same parameters to control high flow and low flow regimes within the +model? Maybe, depending on the system, but also, maybe not. This +analysis can provide insight into how the model responds to its input +parameters, allowing us to compare the results to our expectaions. This +may allow us to find problems with our intial assumptions, or call +attention to model features that can be improved or expanded upon. +Depending on the model and context, it may also yield insight into the +workings of the underlying system.

+

To run this tutorial on your own model you will need to:

+
    +
  1. Design your experiment by choosing sampling bounds for your +parameters and setting up the problem dictionary as in step 2-2

  2. +
  3. Choose the parameters of interest

  4. +
  5. Generate samples using the saltelli.sample function. This step is +problem-dependent and note that the Sobol method can be +computationally intensive depending on the model being analyzed. More +complex models will be slower to run and will also require more +samples to calculate accurate estimates of Sobol indices. Once you +complete this process, pay attention to the confidence bounds on your +sensitivity indices to see whether you need to run more samples.

  6. +
  7. Run the parameter sets through your model and record each of the +desired model outputs.

  8. +
  9. Calculate the Sobol indices for each performance criteria. Now, the Y +will be a numpy array with your external model output and you will +need to include the parameter samples as an additional argument.

  10. +
  11. Follow the procedure in step 2.6 to disaggregate performance across +time

  12. +
+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A2.5_discovery.html b/dev/docs/html/A2.5_discovery.html new file mode 100644 index 0000000..40ab6df --- /dev/null +++ b/dev/docs/html/A2.5_discovery.html @@ -0,0 +1,811 @@ + + + + + + + + + + + Time-evolving scenario discovery for infrastructure pathways — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

Time-evolving scenario discovery for infrastructure pathways#

+
+

Note

+
+
Run the tutorial interactively: Scenario Discovery Notebook.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

Time-evolving scenario discovery for infrastructure pathways#

+

The purpose of this tutorial is to explore time-evolving vulnerability for systems that dynamically adapt to changing conditions. Using an example from water supply planning, we’ll first examine how performance of a dynamic infrastructure pathway policy changes over time, then use factor mapping (main text Chapter 4.3) to understand which combinations of uncertaities generate vulnerability for two water utilities. Next, we’ll perform factor prioritization (main text Chapter 4.3) to determine which uncertainties have the most influence on water supply performance. Finally, we’ll provide an open platform to explore vulnerability across multiple measures of performance and different combinations of uncertainties.

+
+

Background#

+

The Bedford-Greene metropolitan area (Figure 1) is a stylized water resources test case where two urban water utilities seek to develop an infrastructure and investment and management strategy to confront growing demands and changing climate. The utilities have agreed to jointly construct a new water treatment plant on Lake Classon, a large regional resource. Both utilities have also identified a set of individual infrastructure options to construct if necessary.

+
+Figure 1 +
+

Figure 1#

+
+
+

The utilities are formulating a cooperative and adaptive regional management strategy that uses a risk-of-failure (ROF) metric to trigger both short term drought mitigation actions (water use restrictions and treated transfers between utilities) and long-term infrastructure investment decisions (shown in Figure 2a). Both utilities have specified a set of risk triggers and developed a construction order for available infrastructure options.

+

The utilities have run a Monte Carlo simulation to evaluate how these policies respond to a wide array of future States Of the World (SOWs). Each SOW represents a different combinations of thirteen uncertain system inputs including demand growth rates, changes to streamflows and financial variables. In this context, a fully specified SOW is composed of one sample of uncertain model inputs (e.g. one projection of demand growth rate coupled with one future streamflow scenario and one projection of future financial conditions). The water utilities used Latin Hypercube sampling (Chapter 3.3 of the main text) to develop an ensemble of 1,000 plausible future SOWs. The Monte Carlo simulation evaluates each candidate water supply infrastructure investment and management policy across all 1,000 SOWs, as shown in Figure 2b. For more details on the Monte Carlo sampling for this type of analysis, see Trindade et al., (2019).

+

The ROF-based policies respond to each SOW by generating a unique infrastructure pathway - a sequence of infrastructure investment decisions over time. Infrastructure pathways over a set of 1,000 future SOWs are shown in Figure 2c. Infrastructure options are arrayed along the vertical axis and the sequence of infrastructure investments triggered by the policy is plotted as pathways over time. Since the adaptive rule system generates a unique infrastructure sequence for each scenario, Figure 2c summarizes the ensemble of pathways by clustering SOWs according to infrastructure intensity. Dark green lines represent SOWs where the utilities heavily invest in new infrastructure, light green lines represent SOWs with low infrastructure investment and medium shaded lines represent moderate investment. The shading behind each pathway represents the frequency that each infrastructure option was triggered over time across sampled scenarios

+
+Figure 2 +
+

Figure 2#

+
+
+
+
+

Evaluating Robustness over time#

+

The two water utilities are interested in mainting both supply +reliability and financial stability across the broadest set of plausible +future SOWs. To measure the performance of the infrastructure pathway +policy, they’ve defined five critical performance criteria:

+
    +
  • Reliability > 99%

  • +
  • Restriction Frequency < 20%

  • +
  • Peak Financial Cost < 80% of annual revenue (a measure of debt +service spending)

  • +
  • Worst-case drought management cost < 10% of annual revenue (a measure +of unexpected drought costs)

  • +
  • Unit Cost of Expansion < 5 dollars/kgal

  • +
+

To assess the robustness of the infrastructure pathway policy, the two +utilities apply a satisficing metric, which measures the percentage of +sampled SOWs where the pathway policy meets the peformance criteria:

+

\(R =\frac{1}{N}\sum_{j=1}^{N}\Lambda_{\theta,j}\)

+

Where, \(\Lambda\_{\theta,j}=\)

+
+\[\begin{split}\begin{cases} +1, \quad \textrm{if}\ F(\theta)_{j}\leq \Phi_j \\ +0, \quad \textrm{otherwise} +\end{cases}\end{split}\]
+

And \(\Phi\) is a vector of performance criteria for utility +\(j\), \(\theta\) is the portfolio and \(N\) is the total +number of sampled SOWs.

+

Below, we’ll visualize how robustness for the two utilities evolves over the 45-year planning horizon. We’ll assess robustness across three time periods, near-term (first 10 years), mid-term (22 years) and long term (45 years).

+

We start by loading robustness values for the both utilities. These values are calculated by applying the robustness metric above across 2,000 simulated SOWs. To make this exercise computationally tractable, we’ve precomputed these values, which can be found in the files “short_term_robustness.csv”, “mid_term_robustness.csv” and “long_term_robustness.csv”. These values are calculated using the function “check_rdm_meet_criteria” within the helper functions.

+
import numpy as np
+from matplotlib import pyplot as plt
+from functions.eBook_SD_helpers import check_rdm_meet_criteria, create_sd_input, plot_selected_tree_maps, get_factor_importances, open_exploration
+import seaborn as sns
+
+# load Deeply uncertain factors
+rdm_factors = np.loadtxt('data/DU_Factors.csv', delimiter= ',')
+
+sns.set()
+short_term_robustness = np.loadtxt('data/short_term_robustness.csv', delimiter= ',')
+mid_term_robustness = np.loadtxt('data/mid_term_robustness.csv', delimiter = ',')
+long_term_robustness = np.loadtxt('data/long_term_robustness.csv', delimiter = ',')
+
+# plot robustness over time
+fig =plt.figure(figsize=(9,3))
+plt.plot([10,22,45], [short_term_robustness[5]*100, mid_term_robustness[5]*100,long_term_robustness[5]*100], c='#B19CD9')
+plt.plot([10, 22, 45], [short_term_robustness[11]*100, mid_term_robustness[11]*100, long_term_robustness[11]*100], c= '#43b284')
+plt.scatter([10,22,45], [short_term_robustness[5]*100, mid_term_robustness[5]*100,long_term_robustness[5]*100], s=100, c='#B19CD9')
+plt.scatter([10, 22, 45], [short_term_robustness[11]*100, mid_term_robustness[11]*100, long_term_robustness[11]*100], s=100, c='#43b284')
+plt.xlabel('Time Horizon (yrs)')
+plt.ylabel('Robustness (% SOWs)')
+plt.legend(['Bedford', 'Greene'])
+plt.title('Robustness Over Time')
+plt.ylim([0, 107])
+
+
+
(0.0, 107.0)
+
+
+_images/discovery_4_1.png +
+
+

Exploring performance evolution#

+

The figure above reveals that the robustness of both water utilities degrades over time, with Bedford’s robustness declining further than Greene. This suggests that the proposed pathway policy is likely insufficient to meet the long-term needs of the two utilities. But how is the current policy insufficient? To answer that question we examine the performance measures that fail to meet performance criteria for each utility across the three planning horizons.

+
# Plot the type of vulnerability over time
+
+### Bedford ###
+plot_robustness_1 = np.zeros([3,5])
+# Determine the percentage of failure SOWs that violate each criterion (note some SOWS fail multiple criteria, so this may some to >1)
+criteria = ['Reliability', 'Restriction Frequency', 'Peak Financial Cost', 'Worst-case drought\nManagement Cost', 'Stranded Assets']
+plot_robustness_1[0,:] = (1 - short_term_robustness[0:5])/(1-short_term_robustness[5])
+plot_robustness_1[1,:] = (1 - mid_term_robustness[0:5])/(1-mid_term_robustness[5])
+plot_robustness_1[2,:] = (1 - long_term_robustness[0:5])/(1-long_term_robustness[5])
+
+# Plot over time
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,4))
+axes[0].bar(np.arange(5), plot_robustness_1[0,:], color='#B19CD9')
+axes[0].set_xticks(np.arange(5))
+axes[0].set_xticklabels(criteria, rotation='vertical')
+axes[0].set_ylim([0,1])
+axes[0].set_title('10-year Horizon')
+axes[0].set_ylabel('Fraction of failure SOWs')
+axes[1].bar(np.arange(5), plot_robustness_1[1,:], color='#B19CD9')
+axes[1].set_xticks(np.arange(5))
+axes[1].set_xticklabels(criteria, rotation='vertical')
+axes[1].set_ylim([0,1])
+axes[1].set_title('22-year Horizon')
+axes[2].bar(np.arange(5), plot_robustness_1[2,:], color='#B19CD9')
+axes[2].set_xticks(np.arange(5))
+axes[2].set_xticklabels(criteria, rotation='vertical')
+axes[2].set_title('45-year Horizon')
+axes[2].set_ylim([0,1])
+fig.suptitle('Bedford')
+plt.tight_layout()
+
+### Greene ###
+# Determine the percentage of failure SOWs that violate each criterion (note some SOWS fail multiple criteria, so this may some to >1)
+plot_robustness_2 = np.zeros([3, 5])
+plot_robustness_2[0, :] = (1 - short_term_robustness[6:11]) / (1 - short_term_robustness[11])
+plot_robustness_2[1, :] = (1 - mid_term_robustness[6:11]) / (1 - mid_term_robustness[11])
+plot_robustness_2[2, :] = (1 - long_term_robustness[6:11]) / (1 - long_term_robustness[11])
+
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9, 4))
+axes[0].bar(np.arange(5), plot_robustness_2[0, :], color='#43b284')
+axes[0].set_xticks(np.arange(5))
+axes[0].set_xticklabels(criteria, rotation='vertical')
+axes[0].set_title('10-year Horizon')
+axes[0].set_ylim([0,1])
+axes[0].set_ylabel('Fraction of failure SOWs')
+axes[1].bar(np.arange(5), plot_robustness_2[1, :], color='#43b284')
+axes[1].set_xticks(np.arange(5))
+axes[1].set_xticklabels(criteria, rotation='vertical')
+axes[1].set_title('22-year Horizon')
+axes[1].set_ylim([0,1])
+axes[2].bar(np.arange(5), plot_robustness_2[2, :], color='#43b284')
+axes[2].set_xticks(np.arange(5))
+axes[2].set_xticklabels(criteria, rotation='vertical')
+axes[2].set_title('45-year Horizon')
+axes[2].set_ylim([0,1])
+fig.suptitle('Greene')
+plt.tight_layout()
+
+
+_images/discovery_6_0.png +_images/discovery_6_1.png +

In the figures above, we observe that the vulnerability of both utilities changes in different ways. Early in the simulation period, Bedford is vulnerable to failures in reliability (though the robustness figure created in step B5.2 reveals that these failures are very rare). As the simulation period progresses, Bedford’s vulnerability expands to include failures in restriction frequency and worst-case cost. These failures indicate that the utility has an overall inability to manage drought conditions and future conditions progress.

+

Greene shows a very different evolution in vulnerability. Early in the simulation period, failures manifest in the restriction frequency objective, suggesting that the utility must rely on water use restrictions to maintain supply reliability. As the simulation progresses however, the vulnerability evolves. When evaluated across the 45-year planning horizon, a new failure modes emerges - financial failure manifesting in peak financial cost and stranded assets. This suggests that the proposed pathway policy may be over-investing in new infrastructure, straining the utility’s budget with large debt payments that are unnecessary to maintain supply reliability.

+
+
+

How do deep uncertainties generate vulnerability#

+

While the evolution of robustness provides insight into how the system +evolves over time, it does not reveal why each utility is vulnerable. +To examine how deep uncertainties generate vulnerability over time for +the two utilities, we perform scenario discovery (factor mapping, +Chapter 4.3). Here we’ll utilize gradient boosted trees to identify +regions of the uncertainty space that cause the utilities to fail to +meet performance criteria.

+
# import the performance data across 2000 SOWs for three time periods
+short_term_performance = np.loadtxt('data/short_term_performance.csv', delimiter= ',')
+mid_term_performance = np.loadtxt('data/mid_term_performance.csv', delimiter = ',')
+long_term_performance = np.loadtxt('data/long_term_performance.csv', delimiter = ',')
+
+satisficing_criteria = [.98, .2, .8, .1, 5]
+
+# transform into scenario discovery input
+short_term_SD_input = create_sd_input(short_term_performance, satisficing_criteria)
+mid_term_SD_input = create_sd_input(mid_term_performance, satisficing_criteria)
+long_term_SD_input = create_sd_input(long_term_performance, satisficing_criteria)
+
+# factor mapping Bedford
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3))
+plot_selected_tree_maps(5, 'short_term', 0, 6, satisficing_criteria, 0, axes[0])
+axes[0].set_title('10-year Horizon')
+plot_selected_tree_maps(5, 'mid_term', 0, 6, satisficing_criteria, 0, axes[1])
+axes[1].set_title('22-year Horizon')
+plot_selected_tree_maps(5, 'long_term', 0, 1, satisficing_criteria, 0, axes[2])
+axes[2].set_title('45-year Horizon')
+fig.suptitle('Bedford Factor Maps')
+plt.tight_layout()
+
+# factor mapping Greene
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3))
+plot_selected_tree_maps(11, 'short_term', 0, 8, satisficing_criteria, 0, axes[0])
+axes[0].set_title('10-year Horizon')
+plot_selected_tree_maps(11, 'mid_term', 0, 6, satisficing_criteria, 0, axes[1])
+axes[1].set_title('22-year Horizon')
+plot_selected_tree_maps(11, 'long_term', 0, 1, satisficing_criteria, 0, axes[2])
+axes[2].set_title('45-year Horizon')
+fig.suptitle('Greene Factor Maps')
+plt.tight_layout()
+
+
+
Factor map for Bedford
+Factor map for Bedford
+Factor map for Bedford
+Factor map for Greene
+Factor map for Greene
+Factor map for Greene
+
+
+_images/discovery_9_1.png +_images/discovery_9_2.png +

In the figures above, we learn more about how the vulnerability of the two utilities evolves over time. Bedford begins with very few possible failures but appears vulnerable to high demand growth scenarios under future scenarios with high demands. When evaluated across a 22-year planning horizon, Bedford is vulnerable when the near-term demand growth is high and water use restrictions are less effective than predicted. Under the full 45-year planning horizon, Bedford is vulnerable to sustained high levels of demand growth, failing if either near-term or mid-term demand growth exceeds expected levels.

+

Greene’s vulnerability evolves differently. It begins with vulnerability to high demand growth, but as the simulation progresses (and infrastructure is constructed), the utility becomes vulnerable to low-demand growth futures which cause the failures in financial criteria shown in section B.5.3. This indicates that the pathway policy over-builds in many SOWs, and becomes financially unstable if demand does not grow sufficiently to provide revenue to cover debt service payments.

+
+
+

Which uncertainties have the most influence on time-evolving performance?#

+

The factor maps generated in B.5.4 present the vulnerability generated by the two most important deep uncertainties as determined by Gradient Boosted Trees. Yet the factor prioritization shows that more than two uncertainties are influential to regional performance. Further, we can observe that individual uncertainties have different impacts on each performance obejctive, and these impacts may change over time. In the cells below, explore the impact of deep uncertainty by generating factor maps for different combinations of deep uncertain factors, objectives and time horizons.

+
sns.set_style('white')
+uncertainties = ['D1', 'D2', 'D3', 'BT', 'BM', 'DR', 'RE', 'EV', 'PM', 'CT', 'IA', 'IF', 'IP']
+uncertainties = ['Near-term demand', 'Mid-term demand', 'Long-term demand', 'Bond Term', 'Bond Rate', 'Discount Rate', 'Restriction Effectiveness', 'Evaporation Rate', 'Permitting time', 'Construction time', 'Inflow Amplitude', 'Inflow Frequency', 'Inflow Period']
+
+u1_st_FI = get_factor_importances(short_term_SD_input, rdm_factors, 250, 4, 5)
+u1_mt_FI = get_factor_importances(mid_term_SD_input, rdm_factors, 250, 4, 5)
+u1_lt_FI = get_factor_importances(long_term_SD_input, rdm_factors, 250, 4, 5)
+
+u1_all = np.vstack([u1_st_FI,u1_mt_FI, u1_lt_FI])
+u1_all = np.transpose(u1_all)
+
+# factor ranking -- utility 2
+u2_st_FI = get_factor_importances(short_term_SD_input, rdm_factors, 250, 4, 11)
+u2_mt_FI = get_factor_importances(mid_term_SD_input, rdm_factors, 250, 4, 11)
+u2_lt_FI = get_factor_importances(long_term_SD_input, rdm_factors, 250, 4, 11)
+u2_all = np.vstack([u2_st_FI,u2_mt_FI, u2_lt_FI])
+u2_all = np.transpose(u2_all)
+
+fig, (ax, ax2, cax) = plt.subplots(ncols=3,figsize=(5,5),
+                  gridspec_kw={"width_ratios":[1,1, 0.1]})
+fig.subplots_adjust(wspace=0.3)
+im = ax.imshow(u1_all, cmap='Reds', vmin=0, vmax=.3)
+ax.set_yticks(np.arange(13))
+ax.set_yticklabels(uncertainties)
+ax.set_xticks(np.arange(3))
+ax.set_xlabel('Time Horizon')
+ax.set_title('Bedford')
+
+im1 = ax2.imshow(u2_all, cmap='Reds', vmin=0, vmax=.3)
+ax2.set_yticks(np.arange(13))
+ax2.set_yticklabels([])
+ax2.set_xticks(np.arange(3))
+ax2.set_xlabel('Time Horizon')
+ax2.set_title('Greene')
+fig.colorbar(im, cax=cax, label='Factor Importance')
+plt.tight_layout()
+
+
+_images/discovery_12_0.png +

The Figure above shows the factor importance as determined by gradient boosted trees for both utilities across the three planning horizons. While near-term demand growth is important for both utilities under all three planning horizons, the importance of other factors evolves over time. For example, restriction effectiveness plays an important role for Greene under the 22-year planning horizon but disappears under the 45-year planning horizon. In contrast, the bond interest rate is important for predicting success over the 45-year planning horizon, but does not appear important over the 10- or 22-year planning horizons. These findings highlight how assumptions about the planning period can have a large impact on modeling outcomes.

+
+
+

Open exploration#

+

In the cell below, use the function to explore how factor maps change +for the two utilities based upon the uncertainties plotted, the +objectives of interest and the time horizon.

+
# specify the utility ("Bedford" or "Greene")
+utility = "Bedford"
+
+# specify which performance objectives to investigate (note that not all performance objectives have failures, which may result in a blank factor map)
+# set this to one of the following: "Reliability", "Restriction Frequency", "Peak Financial Cost", "Worst Case Cost" or "Unit Cost"
+objective = "Reliability"
+
+# select uncertainties from the following list: 'D1', 'D2', 'D3', 'BT', 'BM', 'DR', 'RE', 'EV', 'PM', 'CT', 'IA', 'IF', 'IP'
+uncertainty_1 = 'D1'
+uncertainty_2 = 'D2'
+
+# The code below will plot factor maps over the three planning horizons for the information above
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3))
+open_exploration(utility, objective, 'short_term', uncertainty_1, uncertainty_2, axes[0])
+open_exploration(utility, objective, 'mid_term', uncertainty_1, uncertainty_2, axes[1])
+open_exploration(utility, objective, 'long_term', uncertainty_1, uncertainty_2, axes[2])
+plt.tight_layout()
+
+
+
Factor map for Bedford, reliability
+Factor map for Bedford, reliability
+Factor map for Bedford, reliability
+
+
+_images/discovery_16_1.png +
+
+

Tips to apply this methodology to your own problem#

+

In this tutorial, we demonstrated time-evolving scenario discovery for a +cooperative water supply system. To apply this workflow to your own +problem:

+
    +
  1. Choose sampling bounds for your parameters of interest, which will +represent uncertainties that characterize your system.

  2. +
  3. Generate samples for these parameters (this can be done using the +saltelli.sample function as in B.2 or done with another package).

  4. +
  5. Define performance criteria for your problem

  6. +
  7. Evaluate parameter sets through your model, and save performance +measures across multiple time horizons

  8. +
  9. Draw from the supporting code for this tutorial to perform scneario +discovery and visualize results

  10. +
+
+
+

References#

+

Trindade, B. C., Reed, P. M., & Characklis, G. W. (2019). Deeply uncertain pathways: Integrated multi-city regional water supply infrastructure investment and portfolio management. Advances in Water Resources, 134, 103442.

+
+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A2.6_hmm.html b/dev/docs/html/A2.6_hmm.html new file mode 100644 index 0000000..1df9220 --- /dev/null +++ b/dev/docs/html/A2.6_hmm.html @@ -0,0 +1,1370 @@ + + + + + + + + + + + A Hidden-Markov Modeling Approach to Creating Synthetic Streamflow Scenarios Tutorial — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

A Hidden-Markov Modeling Approach to Creating Synthetic Streamflow Scenarios Tutorial#

+
+

Note

+
+
Run the tutorial interactively: HMM Notebook.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

A Hidden-Markov Modeling Approach to Creating Synthetic Streamflow Scenarios#

+

In this notebook, we will be covering the basics of fitting a Hidden +Markov Model-based synthetic streamflow generator for a single site in +the Upper Colorado River Basin. First, we will characterize the observed +historical flow in the basin from 1909-2013. Then, we will fit a +synthetic streamflow generator to the observed flows in the basin in +order to create stationary synthetic flows. Finally, we will create a +non-stationary version of the generator to create flows that could be +representative of plausible future climate in the region. We ultimately +show how to place the synthetically generated flows in the context of +physically-informed CMIP5 projections to compare the two methods.

+
+

Background#

+

In the Western United States (US), and particularly the Colorado River +Basin, a recent study used tree-ring reconstructions to suggest that the +megadrought that has been occurring in the Southwest over the past 22 +years is the region’s worst drought since about 800 AD (Williams et al., +2022). The study’s lead author, UCLA climatologist Park Williams, +suggested that had the sequence of wet-dry years occurred without +anthropogenic forcing, the 2000s would have likely still been dry, but +not on the same level as the worst of the last millennium’s +megadroughts.

+

The recent trend of warming and reduced soil moisture in the Southwest +US is highly challenging from a water systems planning and management +perspective for the Colorado River Basin. Given the wide recognition +that the river is over-allocated, the most recent drought highlights the +difficulty of sustaining the flow requirements as dictated by the +Colorado Compact. Thus, there has been an increasing focus in +exploratory modeling efforts to clarify how vulnerable water systems in +this region are to plausible drought streamflow scenarios for the +future. In this tutorial, we’ll discuss how to create these scenarios +using a Hidden Markov Model (HMM)- based streamflow synthetic generator. +As discussed in Section +2.1 +and +4.2 +of the eBook, future climate conditions in the basin represent a deep +uncertainty that can lead to highly consequential water scarcity +outcomes. It is advantageous to create a model such as the HMM-based +generator in order to facilitate the creation of many ensembles of +streamflow that can ultimately be used to force regional water systems +models to understand how variability and drought extremes affect +regional water shortages, operations, and policies.

+
+
+
Lake Powell shows persistent effects from drought (Source: U.S. Bureau of Reclamation)
+
+
+
+

Let’s Get Started!#

+
+

Observed Record#

+

First, let’s take a look at the observed data from 1909-2013 for a +specific site. In this example, we use the outlet gauge of the Upper +Colorado River (USGS Gauge 09163500 at the Colorado-Utah state line). +Below, we create a plot of the annual streamflow.

+
# Import libraries
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import matplotlib.patches as patches
+import numpy as np
+import pandas as pd
+from random import random
+from SALib.sample import latin
+from scipy import stats as ss
+import statistics
+import statsmodels.api as sm
+
+
+# Import helper functions from local package
+from functions import fitmodel
+from functions import plotstates
+from functions import plotdist
+
+
+
# Read in annual historical data
+AnnualQ = pd.read_csv('data/uc_historical.csv')
+AnnualQ['Year'] = list(range(1909, 2014))
+
+# Plot a line graph
+fig, ax = plt.subplots(figsize=(12, 8))
+ax.plot(AnnualQ.iloc[:, 1],
+        AnnualQ.iloc[:, 0],
+        color='#005F73',
+        label='Annual')
+
+# Add labels and title
+ax.set_title("Upper Colorado Annual Flow")
+ax.set_xlabel("Year", fontsize=16)
+ax.set_ylabel("Annual Flow (cubic feet per year)", fontsize=16)
+plt.xticks(fontsize=12)
+plt.yticks(fontsize=12)
+mpl.rc('legend', fontsize=16)
+legend = plt.legend(loc="upper right")
+plt.show()
+plt.close()
+
+
+_images/hmm_9_0.png +

Let’s calculate an 11-year rolling mean of the same data to get a sense +of long-term trends.

+
fig, ax = plt.subplots(figsize=(12, 8))
+
+# Plot the original line graph
+plt.plot(AnnualQ.iloc[:,1],
+         AnnualQ.iloc[:,0],
+         color='#005F73',
+         label='Annual')
+
+# Plot an 11-year rolling mean
+plt.plot(AnnualQ.iloc[:, 1].rolling(11).mean(),
+         AnnualQ.iloc[:, 0].rolling(11).mean(),
+         color='#183A2E',
+         label='11-Year Rolling Mean')
+
+# Add labels and title
+plt.title("Upper Colorado Annual Flow")
+ax.set_xlabel("Year",fontsize=16)
+ax.set_ylabel("Annual Flow (cubic feet per year)", fontsize=16)
+plt.xticks(fontsize=12)
+plt.yticks(fontsize=12)
+mpl.rc('legend', fontsize=16)
+legend = plt.legend()
+plt.show()
+plt.close()
+
+
+_images/hmm_11_0.png +

The Colorado Compact, which prescribes flows between the Upper and Lower +Colorado Basins, was negotiated using data prior to 1922, a time period +revealed by the above figure to be one of the consistently wetter +periods on record. It’s clear today that since the 1980s, the Southwest +US has been experiencing aridification (Overpeck et al., 2020) and that +this observed record alone isn’t an accurate representation of what +future climate might look like in this region.

+

Let’s get a little more specific and formally quantify decadal droughts +that have occurred in the observed period. We use a metric proposed in +Ault et al. (2014). The authors define a decadal drought as when the +11-year rolling mean falls below a threshold that is 1/2 a standard +deviation below the overall mean of the record. We can then highlight +the block of years that fall in a decadal drought using yellow +rectangles below.

+
# Define drought threshold
+std = statistics.stdev(AnnualQ.iloc[:, 0])
+threshold = np.mean(AnnualQ.iloc[:, 0] - (0.5 * std))
+
+# Find where the rolling mean dip below the threshold?
+drought_instances = [i for i, v in enumerate(AnnualQ.iloc[:,0].rolling(11).mean()) if v < threshold]
+drought_years = AnnualQ.iloc[:, 1].rolling(11).mean()[drought_instances]
+
+# Add labels and title
+fig, ax = plt.subplots(figsize=(12, 8))
+ax.plot(AnnualQ.iloc[:,1],
+        AnnualQ.iloc[:,0],
+        color='#005F73',
+        label='Annual')
+
+ax.plot(AnnualQ.iloc[:,1].rolling(11,center=True).mean(),
+        AnnualQ.iloc[:,0].rolling(11,center=True).mean(),
+        color='#183A2E',
+        label='11-Year Rolling Mean')
+
+ax.axhline(y=threshold,
+           color='black',
+           linestyle='--',
+           label='Drought Threshold')
+
+# Visualize the drought periods as yellow rectangles
+for i in drought_years:
+
+    # Plot a box centered around those values and with 5 years on either side.
+    rect = patches.Rectangle((i-5,0), 11,2e7, linewidth=1, edgecolor='#EFE2BE', facecolor='#EFE2BE')
+
+    # Add the patch to the Axes
+    ax.add_patch(rect)
+
+
+plt.title("Upper Colorado Annual Flow")
+ax.set_xlabel("Year", fontsize=16)
+ax.set_ylabel("Annual Flow (cubic feet per year)", fontsize=16)
+plt.xticks(fontsize=12)
+plt.yticks(fontsize=12)
+mpl.rc('legend', fontsize=16)
+legend = plt.legend()
+plt.show()
+plt.close()
+
+
+_images/hmm_14_0.png +

By this metric, the Upper Colorado Basin region has experienced two +decadal droughts over the last century.

+
+
+

Synthetic Stationary Generator to Better Quantify Natural Variability#

+

It is important to remember that the streamflow that we have observed in +the region over the last century is only one instance of the hydrology +that could occur since the atmosphere is an inherently stochastic +system. Thus, we require a tool that will allow us to see multiple +plausible realizations of the streamflow record to understand the +internal variability that characterizes the historical period. One +observed realization of historical streamflow is limited in its ability +to capture rare extremes; plausible (but not observed) alternative +instances of streamflow records can help to fill this gap. The tool that +we use to develop synthetic flows for the region is a Gaussian Hidden +Markov Model (HMM). If a system follows a Markov process, it switches +between a number of “hidden states” dictated by a transition matrix. +Each state has its own Gaussian probability distribution (defined by a +mean and standard deviation) and one can draw from this distribution to +create synthetic flows that fit the properties of the historical +distribution. HMMs are an attractive choice for this region because they +can simulate persistence (i.e., long duration droughts), which is a +characteristic of the region’s hydro-climatology. The figure below shows +an example of a 2-state Gaussian HMM that we will be fitting for this +example.

+
+
+
Two-state Gaussian HMM with mean and standard deviation parameters
+

Below is the code that fits the HMM model to the last 2/3 of the +historical record of log annual flows at the CO-UT stateline gauge and +creates an alternative trace of 105 years. A subset of the dataset is +chosen in order to minimize overfitting and to retain a set of data for +validation of the model. When we fit our model, we utilize the +Baum-Welch algorithm (a special version of the expectation-maximization +algorithm) to find the optimal parameters that maximize the likelihood +of seeing the observed flows. Ultimately, the algorithm will return a +mean and standard deviation associated with each state (mus and sigmas +defined below) and a 2x2 transition probability matrix that captures the +likelihood of transitioning between states (P). We can also retrieve the +annual hidden states across the observed series, also known as the +Viterbi sequence of states, which classifies each year in a “wet” or +“dry” state.

+
# Number of years for alternative trace
+n_years = 105
+
+# Import historical data that it used to fit HMM model
+AnnualQ_h = pd.read_csv('data/uc_historical.csv')
+
+# Fit the model and pull out relevant parameters and samples
+logQ = np.log(AnnualQ_h)
+hidden_states, mus, sigmas, P, logProb, samples, model = fitmodel.fitHMM(logQ, n_years)
+
+
+

We’ve fit our HMM, but what does the model look like? Let’s plot the +annual time series of hidden states, or the Viterbi sequence. In the +code, above, we have defined that the drier state is always represented +by state 0. Thus, we know that hidden_states = 0 corresponds to the dry +state and hidden_states = 1 to the wet state.

+
# Plot Vitebi sequence
+plotstates.plotTimeSeries(np.log(AnnualQ.iloc[:,0]), hidden_states, 'Annual Flow (cube feet per year)')
+
+
+_images/hmm_21_0.png +

In the figure above, we see that the years with the higher log flows +tend to be classified in a “wet” state and the opposite is true of the +“dry” state. We can also print the transition matrix, which shows the +likelihood of transitioning between states. Note that the system has a +high likelihood of persisting in the same state.

+
print(model.transmat_)
+
+
+
[[0.65095026 0.34904974]
+ [0.3205531  0.6794469 ]]
+
+
+

Let’s also plot the distribution of log annual flows associated with the +wet and dry states.

+
# Plot wet and dry state distributions
+plotdist.plotDistribution(logQ, mus, sigmas, P)
+
+
+_images/hmm_25_0.png +

The wet state distribution is characterized by a greater mean flow, but +note that there is significant overlap in the tails of the distributions +below which demonstrates why years with similiar flows can be classified +in different states.

+

Now let’s see what the drought dynamics look like in the synthetic +scenario that we created using the same definition that we had used for +the historical period.

+
# Retrieve samples and back-transform out of log space
+AnnualQ_s = np.exp(samples[0])
+AnnualQ_s = pd.DataFrame(AnnualQ_s)
+AnnualQ_s['Year'] = list(range(1909, 2014))
+
+# Define drought threshold
+std=statistics.stdev(AnnualQ_s.iloc[:, 0])
+threshold=np.mean(AnnualQ_s.iloc[:, 0] - (0.5 * std))
+
+# Where does the rolling mean dip below the threshold
+drought_instances = [i for i,v in enumerate(AnnualQ_s.iloc[:, 0].rolling(11).mean()) if v < threshold]
+drought_years = AnnualQ_s.iloc[:, 1].rolling(11).mean()[drought_instances]
+
+#Visualize the streamflow scenario
+fig, ax = plt.subplots(figsize=(12, 8))
+
+#Plot the original line graph
+ax.plot(AnnualQ_s.iloc[:,1],
+        AnnualQ_s.iloc[:,0],
+        color='#005F73',
+        label='Annual')
+
+#Plot a 11-year rolling mean
+ax.plot(AnnualQ_s.iloc[:,1],
+        AnnualQ_s.iloc[:,0].rolling(11, center=True).mean(),
+        color='#183A2E',
+        label='11-Year Rolling Mean')
+
+# Add labels and title
+ax.axhline(y=threshold,
+           color='black',
+           linestyle='--',
+           label='Drought Threshold')
+
+
+for i in drought_years:
+
+    #Plot a box centered around those values and with 5 years on either side.
+    rect = patches.Rectangle((i - 5,
+                              0),
+                              11,
+                              2e7,
+                              linewidth=1,
+                              edgecolor='#EFE2BE',
+                              facecolor='#EFE2BE')
+
+    # Add the patch to the Axes
+    ax.add_patch(rect)
+
+
+plt.title("Upper Colorado Annual Flow (Synthetic Stationary)",fontsize=16)
+plt.xlabel("Year", fontsize=16)
+plt.ylabel("Annual Flow (cubic feet per year)", fontsize=16)
+mpl.rc('legend', fontsize=16)
+plt.legend()
+plt.xticks(fontsize=12)
+plt.yticks(fontsize=12)
+plt.show()
+plt.close()
+
+
+_images/hmm_28_0.png +

You can sample from the model and create more 105-year traces and note +how the location and number of decadal droughts changes. This +demonstrates how different the historical record can look just within +the range of natural variability. It’s also important to remember that +when droughts occur can also define the ultimate effect of the drought +(i.e. is it a time when there is a large population growth or a time +when humans can adapt by conserving or building more infrastructure?). A +hydrologic drought need not manifest into an agricultural or operational +drought of the same magnitude if stored surface water is available.

+

We externally run the HMM many times to create a dataset of 100 +instances of the 105-year traces and 1000 instances of the 105-year +traces that are available in the package +(“synthetic_stationary_small_sample_100.csv”,“synthetic_stationary_large_sample_1000”). +The shaded green lines correspond to the flow duration curves (FDCs) for +the generated streamflow traces in comparison with the FDC of the +historical record in beige.

+
+
+
Generated streamflow traces in comparison with the FDC of the historical record.
+

As expected, the stationary synthetic FDCs envelope the historical FDC +and particularly, the synthetic traces offer many more instances of low +flow conditions that could lead to more extreme drought conditions than +what has been observed historically. It is also useful to check for +convergence of samples and to determine how many samples are needed to +fully represent internal variability. Above we see that the extension to +1000 instances of 105-year traces fills out regions of the FDC, +including creating some more extreme drought conditions, but that +additional samples will likely not fill out the FDC substantially more.

+
+
+

Non-Stationary Synthetic Generator to Impose Climate Changes#

+

Now, we create flows under non-stationary conditions to get a better +understanding of what flows can look like under climate changes. In +order to create flows under non-stationary conditions, we can toggle the +parameters of the HMM model in order to create systematic changes to the +model that can represent a changing climate. The HMM has 6 parameters +that define it. When we fit the historical model, the parameters that +are fit represent a baseline parameter value. In this non-stationary +generator, we define a range to sample these parameters from.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

Current Value

Lower Bound

Upper Bound

Log-Space Wet State +Mean Multiplier

1.00

0.98

1.02

Log-Space Dry State +Mean Multiplier

1.00

0.98

1.02

Log-Space Wet State +Standard Deviation +Multiplier

1.00

0.75

1.25

Log-Space Dry State +Standard Deviation +Multiplier

1.00

0.75

1.25

Change in Dry-Dry +Transition +Probability

0.00

-0.30

+0.30

Change in Wet-Wet +Transition +Probability

0.00

-0.30

+0.30

+
+

Now let’s sample 1000 times from these bounds to create 1000 new +parameterizations of the model. Here we use SALib and the Latin +Hypercube sample function.

+
# Create problem structure with parameters that we want to sample
+problem = {
+    'num_vars': 6,
+    'names': ['wet_mu', 'dry_mu', 'wet_std','dry_std','dry_tp',"wet_tp"],
+    'bounds': [[0.98, 1.02],
+               [0.98, 1.02],
+               [0.75,1.25],
+               [0.75,1.25],
+               [-0.3,0.3],
+               [-0.3,0.3]]
+}
+
+# generate 1000 parameterizations
+n_samples = 1000
+
+# set random seed for reproducibility
+seed_value = 123
+
+# Generate our samples
+LHsamples = latin.sample(problem, n_samples, seed_value)
+
+
+

Now let’s look at what some of the traces look like in our +non-stationary generator. Let’s choose a random instance from the +1000-member space and adjust the parameters accordingly.

+
# Define static parameters
+n_years = 105
+
+# Sample parameter; Adjust to any sample number from 0-999
+sample = 215
+
+# Create empty arrays to store the new Gaussian HMM parameters for each SOW
+Pnew = np.empty([2,2])
+piNew = np.empty([2])
+musNew_HMM = np.empty([2])
+sigmasNew_HMM = np.empty([2])
+logAnnualQ_s = np.empty([n_years])
+
+# Calculate new transition matrix and stationary distribution of SOW at last node as well as new means and standard deviations
+Pnew[0, 0] = max(0.0, min(1.0, P[0, 0] + LHsamples[sample][4]))
+Pnew[1, 1] = max(0.0, min(1.0, P[1, 1] + LHsamples[sample][5]))
+Pnew[0, 1] = 1 - Pnew[0, 0]
+Pnew[1, 0] = 1 - Pnew[1, 1]
+eigenvals, eigenvecs = np.linalg.eig(np.transpose(Pnew))
+one_eigval = np.argmin(np.abs(eigenvals - 1))
+piNew = np.divide(np.dot(np.transpose(Pnew), eigenvecs[:, one_eigval]),
+                  np.sum(np.dot(np.transpose(Pnew), eigenvecs[:,one_eigval])))
+
+musNew_HMM[0] = mus[0] * LHsamples[sample][1]
+musNew_HMM[1] = mus[1] * LHsamples[sample][0]
+sigmasNew_HMM[0] = sigmas[0] * LHsamples[sample][3]
+sigmasNew_HMM[1] = sigmas[1] * LHsamples[sample][2]
+
+# Generate first state and log-space annual flow at last node
+states = np.empty([n_years])
+if random() <= piNew[0]:
+    states[0] = 0
+    logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0])
+else:
+    states[0] = 1
+    logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1])
+
+# Generate remaining state trajectory and log space flows at last node
+for j in range(1, n_years):
+    if random() <= Pnew[int(states[j-1]), int(states[j-1])]:
+        states[j] = states[j-1]
+    else:
+        states[j] = 1 - states[j-1]
+
+    if states[j] == 0:
+        logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0])
+    else:
+        logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1])
+
+# Convert log-space flows to real-space flows
+AnnualQ_s = np.exp(logAnnualQ_s)-1
+
+
+

Now let’s see what this synthetic trace looks like.

+
# Retrieve samples and back-transform out of log space
+AnnualQ_s = pd.DataFrame(AnnualQ_s)
+AnnualQ_s['Year'] = list(range(1909, 2014))
+
+# Define drought threshold
+std = statistics.stdev(AnnualQ_s.iloc[:, 0])
+threshold = np.mean(AnnualQ_s.iloc[:, 0] - (0.5 * std))
+
+# Where does the rolling mean dip below the threshold
+drought_instances = [i for i, v in enumerate(AnnualQ_s.iloc[:, 0].rolling(11).mean()) if v < threshold]
+drought_years = AnnualQ_s.iloc[:, 1].rolling(11).mean()[drought_instances]
+
+# Visualize the streamflow scenario
+fig, ax = plt.subplots(figsize=(12, 8))
+
+# Plot the original line graph
+ax.plot(AnnualQ_s.iloc[:,1],
+        AnnualQ_s.iloc[:,0],
+        color='#005F73',
+        label='Annual')
+
+# Plot a 11-year rolling mean
+ax.plot(AnnualQ_s.iloc[:, 1],
+        AnnualQ_s.iloc[:, 0].rolling(11, center=True).mean(),
+        color='#183A2E',
+        label='11-Year Rolling Mean')
+
+# Add labels and title
+ax.axhline(y=threshold,
+           color='black',
+           linestyle='--',
+           label='Drought Threshold')
+
+
+for i in drought_years:
+
+    # Plot a box centered around those values and with 5 years on either side.
+    rect = patches.Rectangle((i - 5,0),
+                             11,
+                             2e7,
+                             linewidth=1,
+                             edgecolor='#EFE2BE',
+                             facecolor='#EFE2BE')
+
+    # Add the patch to the Axes
+    ax.add_patch(rect)
+
+
+plt.title("Annual Flow (Synthetic Non-Stationary)", fontsize=16)
+plt.xlabel("Year", fontsize=16)
+plt.ylabel("Annual Flow (cubic feet per year)", fontsize=16)
+plt.xticks(fontsize=12)
+plt.yticks(fontsize=12)
+mpl.rc('legend', fontsize=16)
+legend = plt.legend(loc="upper right")
+plt.show()
+plt.close()
+
+
+_images/hmm_40_0.png +

Above is the example trace from the new non-stationary model. You may +see fewer or more decadal drought instances. We can further summarize +overall decadal drought characteristics across the samples. Let’s plot a +histogram of the total number of times we go below the drought threshold +across these realizations.

+
decadal_drought_occurence=np.empty([1000])
+
+for y in range(1000):
+
+    # Create empty arrays to store the new Gaussian HMM parameters for each SOW
+    Pnew = np.empty([2, 2])
+    piNew = np.empty([2])
+    musNew_HMM = np.empty([2])
+    sigmasNew_HMM = np.empty([2])
+    logAnnualQ_s = np.empty([n_years])
+
+    # Calculate new transition matrix and stationary distribution of SOW at last node
+    # as well as new means and standard deviations
+
+    Pnew[0, 0] = max(0.0,min(1.0, P[0, 0] + LHsamples[y][4]))
+    Pnew[1, 1] = max(0.0,min(1.0, P[1, 1] + LHsamples[y][5]))
+    Pnew[0, 1] = 1 - Pnew[0, 0]
+    Pnew[1, 0] = 1 - Pnew[1, 1]
+    eigenvals, eigenvecs = np.linalg.eig(np.transpose(Pnew))
+    one_eigval = np.argmin(np.abs(eigenvals - 1))
+    piNew = np.divide(np.dot(np.transpose(Pnew), eigenvecs[:, one_eigval]),
+                      np.sum(np.dot(np.transpose(Pnew), eigenvecs[:, one_eigval])))
+
+    musNew_HMM[0] = mus[0] * LHsamples[y][1]
+    musNew_HMM[1] = mus[1] * LHsamples[y][0]
+    sigmasNew_HMM[0] = sigmas[0] * LHsamples[y][3]
+    sigmasNew_HMM[1] = sigmas[1] * LHsamples[y][2]
+
+    # Generate first state and log-space annual flow at last node
+    states = np.empty([n_years])
+    if random() <= piNew[0]:
+        states[0] = 0
+        logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0])
+    else:
+        states[0] = 1
+        logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1])
+
+    # generate remaining state trajectory and log space flows at last node
+    for j in range(1, n_years):
+        if random() <= Pnew[int(states[j-1]), int(states[j-1])]:
+            states[j] = states[j-1]
+        else:
+            states[j] = 1 - states[j-1]
+
+        if states[j] == 0:
+            logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0])
+        else:
+            logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1])
+
+    # Convert log-space flows to real-space flows
+    AnnualQ_s = np.exp(logAnnualQ_s) - 1
+    AnnualQ_s = pd.DataFrame(AnnualQ_s)
+    AnnualQ_s['Year'] = list(range(1909, 2014))
+
+    # Define drought threshold
+    std = statistics.stdev(AnnualQ_s.iloc[:, 0])
+    threshold = np.mean(AnnualQ_s.iloc[:, 0] - (0.5 * std))
+
+    # Where does the rolling mean dip below the threshold
+    drought_instances = [i for i, v in enumerate(AnnualQ_s.iloc[:, 0].rolling(11).mean()) if v < threshold]
+    decadal_drought_occurence[y] = len(drought_instances)
+
+
+
fig, ax = plt.subplots(figsize=(12, 8))
+ax.hist(decadal_drought_occurence,label='Non-Stationary generator',color="#005F73")
+ax.set_xlabel('Number of Instances of Decadal Drought',fontsize=16)
+ax.set_ylabel('Frequency',fontsize=16)
+ax.axvline(x=2, color='r', linestyle='-',label='Observed')
+mpl.rc('legend', fontsize = 16)
+plt.xticks(fontsize = 12)
+plt.yticks(fontsize = 12)
+plt.show()
+plt.close()
+
+
+_images/hmm_43_0.png +

Note how many more instances of the decadal droughts we are creating +with the non-stationary generator than our observed 105-year trace which +creates a rich space in which we can test our models. Just as we did +with the stationary generator, we can externally run the non-stationary +generator to create 10,000 instances of the 105-year traces that are +available in the package +(“synthetic_nonstationary_large_sample_10000.csv”). The shaded green and +blue lines correspond to the FDCs for the stationary and non-stationary +generated streamflow traces in comparison with the FDC of the historical +record in beige. Note how the non-stationary generator produces even +more drought extremes than the stationary non-synthetic traces.

+
+
+
Generated streamflow traces in comparison with the FDC of the historical record.
+
+
+

Placing CMIP5 Projections in the Context of Non-Stationary Flows#

+

We have broadened the drought conditions that we are creating which that +can be very useful to understand how our water systems model performs +under potentially extreme scenarios. However, it’s useful to compare our +bottom-up synthetically generated flows in the context of global +physically-driven CMIP5 projections to get a better understanding of how +the two approaches compare. We first aquire 97 CMIP5 projections from +the Colorado River Water Availability Study (CWCB, 2012). In each of +these projections, monthly precipitation factor changes and temperature +delta changes were computed between mean projected 2035–2065 climate +statistics and mean historical climate statistics from 1950–2013. These +97 different combinations of 12 monthly precipitation multipliers and 12 +monthly temperature delta shifts were applied to historical +precipitation and temperature time series from 1950–2013. The resulting +climate time series were run through a Variable Infiltration Capacity +(VIC) model of the UCRB, resulting in 97 time series of projected future +streamflows at the Colorado‐Utah state line.

+

We fit an HMM to each trace of projected streamflow and get a set of +corresponding HMM parameters. Then we take the ratio between these +parameters and the baseline HMM parameters that we calculated earlier in +the notebook in order to calculate the multipliers associated with each +CMIP5 projection. This is all done externally, so we import the +resulting multipliers in the next line.

+
# Read in CMIP5 and paleo multipliers
+CMIP5_multipliers = pd.read_csv('data/CMIP5_SOWs.txt', header=None, sep=" ")
+
+
+

Let’s plot a response surface that will allow us to see how combinations +of HMM parameters tend to influence decadal drought. In order to get a +continuous surface, we’ll fit a non-linear regression to the parameter +values and then predict the decadal drought over a set of grid points. +We fit the response surface for two parameters that should have an +affect on decadal drought: the dry distribution mean and the dry-dry +transition probabilites.

+
# Choose two parameters to fit the response surface for
+mu_dry=[i[1] for i in LHsamples]
+tp_dry=[i[4] for i in LHsamples]
+
+# Create an interpolation grid
+xgrid = np.arange(np.min(mu_dry),
+                  np.max(mu_dry),
+                  (np.max(mu_dry) - np.min(mu_dry)) / 100)
+
+ygrid = np.arange(np.min(tp_dry),
+                  np.max(tp_dry),
+                  (np.max(tp_dry) - np.min(tp_dry)) / 100)
+
+# Fit regression
+d = {'Dry_Tp': tp_dry,
+     'Dry_Mu': mu_dry,
+     'Drought_Occurrence':decadal_drought_occurence}
+
+df = pd.DataFrame(d)
+df['Intercept'] = np.ones(np.shape(df)[0])
+df['Interaction'] = df['Dry_Tp'] * df['Dry_Mu']
+cols = ['Intercept'] + ['Dry_Mu'] + ['Dry_Tp'] + ['Interaction']
+ols = sm.OLS(df['Drought_Occurrence'], df[cols])
+result = ols.fit()
+
+# Calculate drought occurence for each grid point
+X, Y = np.meshgrid(xgrid, ygrid)
+x = X.flatten()
+y = Y.flatten()
+grid = np.column_stack([np.ones(len(x)), x, y, x * y])
+z = result.predict(grid)
+z[z < 0.0] = 0.0 # replace negative shortage predictions with 0
+
+
+

Let’s plot our results:

+
# Set color gradient for response surface
+drought_map = mpl.cm.get_cmap('RdBu_r')
+
+# Reshape our predicted drought occurrence and define bounds of colors
+Z = np.reshape(z, np.shape(X))
+vmin = np.min([np.min(z), np.min(df['Drought_Occurrence'].values)])
+vmax = 15
+norm = mpl.colors.Normalize(vmin, vmax)
+
+# Plot response surface and CMIP5 projections
+fig, ax = plt.subplots(figsize=(12, 8))
+ax.contourf(X, Y, Z, cmap=drought_map, norm=norm)
+ax.scatter(CMIP5_multipliers.iloc[:,7],
+           CMIP5_multipliers.iloc[:,12],
+           c='#ffffb3',
+           edgecolor='none',
+           s=30)
+cbar = ax.figure.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=drought_map), ax=ax)
+ax.set_xlim(np.nanmin(X), np.nanmax(X))
+ax.set_ylim(np.nanmin(Y), np.nanmax(Y))
+ax.set_xlabel('Dry State Mu', fontsize=14)
+ax.set_ylabel('Dry-Dry Transition Probability', fontsize=14)
+ax.tick_params(axis='both', labelsize=14)
+cbar.ax.set_ylabel('Decadal Drought Occurrence', rotation=-90, fontsize=14, labelpad=15)
+cbar.ax.tick_params(axis='y',labelsize=14)
+plt.show()
+plt.close()
+
+
+_images/hmm_53_0.png +

We see the influence of the dry state mean and dry-dry transition +parameters. We’re likely to see more decadal droughts when we (1) +increase the dry-dry transition probability, which inherently will +increase persistence of the dry state, and (2) when we make the dry +state log mean drier. Note that the CMIP5 scenarios tend to span the +extent of the dry mean sample space, but are less representative of the +dry transition probability sample space, which suggests that the types +of hydrological droughts represented in the projections tend to only be +wetter to slightly drier than our baseline. Both methods of producing +these scenarios are valid, though studies have suggested that +globally-resolved GCMs may be inappropriate to represent regional +extremes. Ultimately, if your goal is to produce a variety of ensembles +that are characterized by many different drought characteristics, you +will likely find that a generator approach will serve this purpose +better.

+
+
+

Tips to Create an HMM-Based Generator for your System#

+

In this tutorial, we demonstrated how to fit an HMM-based generator for +a single gauge located in the Upper Colorado River Basin. In order to +apply this methodology to your problem, you will need to first ask:

+
    +
  1. Is this model appropriate for my location of interest? We have +applied this style of generator to locations where persistent wet +and dry states are characteristic, which tends to be in the Western +US. Ultimately the best way to judge if an HMM is useful for your +application is to fit the model and explore the resulting +distributions. Are there two (or more) distinct states that emerge? +If not, then your location may not exhibit the type of persistence +that an HMM-based generator is useful for. You can consider +exploring other styles of generators such as the Kirsch-Nowak +generator (Kirsch et al., 2013).

  2. +
  3. Do I have the right datasets? We use annual data for our location of +interest. In this notebook, the HMM is fit to log annual flows. +Ultimately, it can be disaggregated to daily flows (using a +reference historical daily dataset) to be useful in water resources +operational applications. You could also disaggregate to a finer +resolution than daily if the historical dataset exists.

  4. +
+

If you meet these requirements, feel free to proceed through fitting the +model using the code available in the notebook. Be sure to consider the +appropirate number of samples to generate (both in a stationary and +non-stationary case). Make sure that you test multiple sample sizes and +continue to increase your sample size until you converge to a consistent +representation of extremes. What is the appropriate number of LHS +samples of the parameters to use? In this experiment we used 1,000 +samples of parameters due to extensive stability tests described in +Quinn et al. (2020).

+

Finally, to learn more about this test case refer to Hadmichael et +al. (2020a) and Hadmichael et al. (2020b). For another study on +synthetic drought generation to support vulnerability assessments in the +Research Triangle region of North Carolina, please refer to Herman et +al. (2016)

+
+
+

References#

+

Ault, T. R., Cole, J. E., Overpeck, J. T., Pederson, G. T., & Meko, D. +M. (2014). Assessing the risk of persistent drought using climate model +simulations and paleoclimate data. Journal of Climate, 27(20), +7529-7549.

+

CWCB (2012).Colorado River Water Availability Study Phase I Report. +Colorado Water Conservation Board

+

Hadjimichael, A., Quinn, J., Wilson, E., Reed, P., Basdekas, L., Yates, +D., & Garrison, M. (2020a). Defining robustness, vulnerabilities, and +consequential scenarios for diverse stakeholder interests in +institutionally complex river basins. Earth’s Future, 8(7), +e2020EF001503.

+

Hadjimichael, A., Quinn, J., & Reed, P. (2020). Advancing diagnostic +model evaluation to better understand water shortage mechanisms in +institutionally complex river basins. Water Resources Research, 56(10), +e2020WR028079.

+

Herman, J. D., Zeff, H. B., Lamontagne, J. R., Reed, P. M., & +Characklis, G. W. (2016). Synthetic drought scenario generation to +support bottom-up water supply vulnerability assessments. Journal of +Water Resources Planning and Management, (11), 04016050.

+

Kirsch, B. R., Characklis, G. W., & Zeff, H. B. (2013). Evaluating the +impact of alternative hydro-climate scenarios on transfer agreements: +Practical improvement for generating synthetic streamflows. Journal of +Water Resources Planning and Management, 139(4), 396-406.

+

Overpeck, J.T. & Udall, B. (2020) “Climate change and the aridification +of North America.” Proceedings of the national academy of sciences +117.22 11856-11858.

+

Quinn, J. D., Hadjimichael, A., Reed,P. M., & Steinschneider, S. (2020). +Canexploratory modeling of water scarcity vulnerabilities and robustness +bescenario neutral?Earth’s Future,8,e2020EF001650. +https://doi.org/10.1029/2020EF001650Received

+

Williams, A. P., Cook, B. I., & Smerdon, J. E. (2022). Rapid +intensification of the emerging southwestern North American megadrought +in 2020–2021. Nature Climate Change, 12(3), 232-234.

+
+
+
+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A2_Jupyter_Notebooks.html b/dev/docs/html/A2_Jupyter_Notebooks.html new file mode 100644 index 0000000..eedc427 --- /dev/null +++ b/dev/docs/html/A2_Jupyter_Notebooks.html @@ -0,0 +1,3778 @@ + + + + + + + + + + + 2. Jupyter Notebook Tutorials — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Jupyter Notebook Tutorials

+ +
+
+ +
+

Contents

+
+ +
+
+
+ + + + +
+ +
+

B. Jupyter Notebook Tutorials#

+
+

B.1. Fishery Dynamics Tutorial#

+
+

Note

+
+
Run the tutorial interactively: Fishery Dynamics Notebook.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

B.1.1. Tutorial: Sensitivity Analysis (SA) to discover factors shaping consequential dynamics#

+

This notebook demonstrates the application of sensitivity analysis to +discover factors that shape the behavior modes of a socio-ecological +system with dynamic human action.

+

The system of differential equations below represent a system of prey +(defined in the equation below as x) and predator (defined as y) fish, +with a human actor harvesting the prey fish. You can read more about +this system at Hadjimichael et +al. (2020).

+_images/eqn2.png +

The table below defines the parameters in the system and also denotes +the baseline and ranges associated with each uncertain parameter.

+_images/table1.png +

The system is simple but very rich in the dynamic behaviors it exhibits. +This complexity is accompanied by the presence of several equilibria +that come in and out of existence with different parameter values. The +equilibria also change in their stability according to different +parameter values, giving rise to different behavior modes as shown by +the diverse predator and prey abundace trajectories in the figure below.

+_images/Figure_1.png +

In the unharvested system (without the human actor) the stability of +several of these equilibria can be derived analytically. The task +becomes significantly more difficult when the adaptive human actor is +introduced, deciding to harvest the system at different rates according +to their objectives and preferences.

+

Sensitivity analysis methods can help us identify the factors that most +control these dynamics by exploring the space of parameter values and +seeing how system outputs change as a result.

+

Through previously conducted optimization, there already exists a set of +potential harvesting strategies that were identified in pursuit of five +objectives:

+
    +
  • Maximize Harvesting Discounted Profits (Net Present Value)

  • +
  • Minimize Prey Population Deficit

  • +
  • Minimize Longest Duration of Consecutive Low Harvest

  • +
  • Maximize Worst Harvest Instance

  • +
  • Minimize Harvest Variance

  • +
+

The identified harvesting strategies also meet the necessary constraint +of not causing inadvertent predator collapse.

+

We will be examining the effects of parametric uncertainty on these +identified strategies, particularly focusing on two strategies: one +selected to maximize harvesting profits and one identified through +previous analysis to perform ‘well enough’ for all objectives across a +wide range of states of the world (referred to as the ‘robust’ +harvesting policy).

+
+

B.1.1.1. Let’s get started!#

+

In this tutorial, we will be loading in data that has been produced in +Hadjimichael et al. (2020). Before we start our analysis, we’ll load the +relevant Python libraries. NOTE: To step through the notebook, +execute each gray (code) box by typing “Shift+Enter”.

+
#Import necessary libraries
+
+import msdbook
+import numpy as np
+import matplotlib.pyplot as plt
+from SALib.sample import saltelli
+from SALib.analyze import sobol
+from matplotlib import patheffects as pe
+
+# load example data
+msdbook.install_package_data()
+
+%matplotlib inline
+%config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
+
+
+
Downloading example data for msdbook version 0.1.5...
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/uncertain_params_bounds.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_metric_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/solutions.resultfile
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LHsamples_original_1000.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/param_values.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/collapse_days.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_params_256samples.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LeafCatch.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_simulations_256samples.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/Robustness.txt
+
+
+
+
+

B.1.1.2. Step 1: Load identified solutions and explore performance#

+

Here we load in the solution set obtained in Hadjimichael et al. (2020). +The solution set contains the decision variables and objectives +associated with a variety of harvesting policies. For this tutorial, we +focus on comparing two policies: harvesting profits and one that +performs robustly across all objectives. Below, we are reading in the +decision variables and objectives from an external file that can be +found within the msdbook package data.

+
robustness = msdbook.load_robustness_data()
+results = msdbook.load_profit_maximization_data()
+
+robust_solution = np.argmax(robustness[:,-1]) #pick robust solution
+profit_solution = np.argmin(results[:,6]) #pick profitable solution
+objective_performance = -results[:,6:] #Retain objective values
+
+# Get decision variables for each of the policies
+highprofitpolicy = results[profit_solution,0:6]
+mostrobustpolicy = results[robust_solution,0:6]
+
+
+

Next we plot the identified solutions with regards to their objective +performance in a parallel axis plot

+
+

Tip

+

View the source code used to create this plot here: plot_objective_performance

+
+
ax, ax1 = msdbook.plot_objective_performance(objective_performance, profit_solution, robust_solution)
+
+
+_images/fishery_output_6_0.png +

The solution set from the optimization in Hadjimichael et al. (2020) are +presented in a parallel axis plot where each of the five objectives (and +one constraint) are represented as an axis. Each solution on the Pareto +front is represented as a line where the color of the line indicates the +value of the NPV objective. The preference for objective values is in +the upward direction. Therefore, the ideal solution would be a line +straight across the top of the plot that satisfies every objective. +However, no such line exists because there are tradeoffs when sets of +objectives are prioritized over the others. When lines cross in between +axes, this indicates a tradeoff between objectives (as seen in the first +two axes).The solution that is most robust in the NPV objective has the +highest value on the first axis and is outlined in dark gold. The +solution that is most robust across all objectives is outlined in a +brighter yellow. A parallel axis is an effective visual to characterize +high-dimensional tradeoffs in the system and visualize differences in +performance across policies.

+
+
+

B.1.1.3. Step 2: Use SALib to generate a sample for a Sobol sensitivity analysis#

+

In Step 1, we showed how the optimized harvesting policies performed in +the objective space, which utilized the baseline parameters outlined in +the table above. Now, we are interested in understanding how sensitive +our two policies are to alternative states of the world that may be +characterized by different parameter values. To do so, we first need to +define the problem dictionary that allows us to generate these +alternative states of the world.

+
# Set up SALib problem
+problem = {
+  'num_vars': 9,
+  'names': ['a', 'b', 'c', 'd', 'h', 'K', 'm', 'sigmaX', 'sigmaY'],
+  'bounds': [[0.002, 2], [0.005, 1], [0.2, 1], [0.05, 0.2], [0.001, 1],
+             [100, 5000], [0.1, 1.5], [0.001, 0.01], [0.001, 0.01]]
+}
+
+
+

Then we use the following command to generate a Saltelli sample from +these defined ranges:

+
param_values = saltelli.sample(problem, 1024, calc_second_order=False)
+
+
+

Generally, it is a good idea to save the result of the sample since it +is often reused and regenerating it produces a different sample set. For +this reason, we will load one from file that was previously generated.

+
# load previously generated Saltelli sample from our msdbook package data
+param_values = msdbook.load_saltelli_param_values()
+
+
+
+
+

B.1.1.4. Step 3: Evaluate the system over all generated states of the world#

+

Now we re-evaluate how well the policies do in the new states of the +world. In order to characterize failure of a policy, we identify the +states where the predator population collapses, as an inadvertent +consequence of applying the harvesting strategy under a state of the +world different from the one originally assumed. Due to how long this +step takes to execute within the tutorial, we will read in the solutions +from an external file. However, the block of code below shows how +evaluation can be implemented.

+
# create array to store collapse values under both policies
+collapse_days = np.zeros([len(param_values), 2])
+
+# evaluate performance under every state
+for i in range(len(param_values)):
+
+    additional_inputs = np.append(['Previous_Prey'],
+                                  [param_values[i,0],
+                                   param_values[i,1],
+                                   param_values[i,2],
+                                   param_values[i,3],
+                                   param_values[i,4],
+                                   param_values[i,5],
+                                   param_values[i,6],
+                                   param_values[i,7],
+                                   param_values[i,8]])
+
+    collapse_days[i,0]=fish_game(highprofitpolicy, additional_inputs)[1][0]
+    collapse_days[i,1]=fish_game(mostrobustpolicy, additional_inputs)[1][0]
+
+
+
# load the simulation data from our msdbook package data
+collapse_days = msdbook.load_collapse_data()
+
+
+
+
+

B.1.1.5. Step 4: Calculate sensitivity indices#

+

Now we use a Sobol sensitivity analysis to calculate first-order, +second-order, and total-order sensitivity indices for each parameter and +for each of the two policies. These indicies help determine which +factors explain the most variability in the number of days of predator +population collapse.

+
#Perform the Sobol SA for the profit-maximizing solution
+Si_profit = sobol.analyze(problem, collapse_days[:, 0],
+                          calc_second_order=False,
+                          conf_level=0.95,
+                          print_to_console=True)
+
+
+
#Perform the Sobol SA for the robust solution
+Si_robustness = sobol.analyze(problem,
+                              collapse_days[:, 1],
+                              calc_second_order=False,
+                              conf_level=0.95,
+                              print_to_console=True)
+
+
+
              ST   ST_conf
+a       0.226402  0.036146
+b       0.066819  0.013347
+c       0.004395  0.004023
+d       0.024509  0.006993
+h       0.009765  0.005488
+K       0.020625  0.009494
+m       0.897971  0.066470
+sigmaX  0.000136  0.000149
+sigmaY  0.000739  0.001040
+              S1   S1_conf
+a       0.087936  0.044236
+b       0.000554  0.021474
+c      -0.002970  0.004590
+d       0.001206  0.015881
+h       0.004554  0.007998
+K       0.003843  0.012661
+m       0.751301  0.071862
+sigmaX -0.000325  0.001245
+sigmaY -0.001887  0.002768
+
+
+

Looking at the total-order indices, (ST) factors \(m\), \(a\), +\(b\), \(d\) and \(K\) explain a non-negligible amount of +variance therefore have an effect on the stability of this system. +Looking at the first-order indices (S1), we also see that besides +factors \(m\) and \(a\), all other factors are important in this +system through their interactions, which make up the difference between +their S1 and ST indices. This shows the danger of limiting sensitivity +analyses to first order effects, as factor importance might be +significantly misjudged.

+

These findings are supported by the analytical condition of equilibrium +stability in this system:

+_images/eqn4.png +

In an unharvested system, this condition is both necessary and +sufficient for the equilibrium of the two species coexisting to be +stable.

+

When adaptive human action is introduced however, this condition is +still necessary, but no longer sufficient, as harvesting reduces the +numbers of prey fish and as a result reduces the resources for the +predator fish. Since this harvesting value is not constant, but can +dynamically adapt according to the harvester’s objectives, it cannot be +introduced into this simple equation.

+
+
+

B.1.1.6. Step 5: Explore relationship between uncertain factors and performance#

+

In the following steps, we will use the results of our sensitivity +analysis to investigate the relationships between parametric +uncertainty, equilibrium stability and the performance of the two +policies.

+

We can use the top three factors identified (\(m\), \(a\), and +\(b\)) to visualize the performance of our policies in this +three-dimensional parametric space.

+

We first define the stability condition, as a function of \(b\) and +\(m\), and calculate the corresponding values of \(a\).

+
def inequality(b, m, h, K):
+    return ((b**m)/(h*K)**(1-m))
+
+# boundary interval that separates successful and failed states of the world
+b = np.linspace(start=0.005, stop=1, num=1000)
+m = np.linspace(start=0.1, stop=1.5, num=1000)
+h = np.linspace(start=0.001, stop=1, num=1000)
+K = np.linspace(start=100, stop=2000, num=1000)
+b, m = np.meshgrid(b, m)
+a = inequality(b, m, h, K)
+a = a.clip(0,2)
+
+
+
+

Tip

+

View the source code used to create this plot here: plot_factor_performance

+
+
# generate plot
+ax1, ax2 = msdbook.plot_factor_performance(param_values, collapse_days, b, m, a)
+
+
+_images/fishery_output_22_0.png +

These figures show the combinations of factors that lead to success or +failure in different states of the world for the profit-maximizing and +robust policies. Each point is a state of the world, characterized by +specific values of the parameters, and ideally, we would like the color +of the point to be blue, to represent that there are a low number of +days with a predator collapse in that world. The gray curve denotes the +highly non-linear nature of the boundary, defined by the stability +condition, that separates successful and failed states of the world. The +figures demonstrate the following key points:

+

First, as asserted above, the policies interact with the system in +different and complex ways. In the presence of human action, the +stability condition is not sufficient in determining whether the policy +will succeed, even though it clearly shapes the system in a fundamental +manner.

+

Secondly, the robust policy manages to avoid collapse in many more of the sampled states of the world, indicated by the number of blue points. The robust policy avoids collapse in 31% of worlds versus 14% in the profit-maximizing policy.This presents a clear tradeoff between profit-maximizing performance androbustness against uncertainty.

+
+
+

B.1.1.7. Tips to Apply Sobol SA and Scenario Discovery to your Problem#

+

In this tutorial, we demonstrated a Sobol SA to identify the most +important factors driving the behavior of a system (i.e. the number of +the collapse days). In order to apply this methodology to your problem, +you will need to have a set of optimized policies for your system that +you are interested in analyzing. The general workflow is as follows:

+
    +
  1. Choose sampling bounds for your parameters and set up the problem +dictionary as in Step 2 above.

  2. +
  3. Generate samples, or alternative states of the world using the +saltelli.sample function.

  4. +
  5. Evaluate your policies on the alternative states of the world. For +your application, you will also need to develop a rule for +determining success or failure of your policy in a new SOW. In this +tutorial, success was denoted by a small number of collapse days. +Ultimately, the rule will be specific to your application and can +include various satisficing criteria.

  6. +
  7. Calculate the Sobol indices and discover the most important +parameters driving success and failure.

  8. +
  9. Finally, use a similar plotting procedure as in step 5 to identify +the combination of parameter values that lead to success and failure +in the system.

  10. +
+
+
+
+
+

B.2. Sobol SA Tutorial#

+
+

Note

+
+
Run the tutorial interactively: Sobol SA Tutorial.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

B.2.1. Tutorial: Sensitivity Analysis (SA) using the Saltelli sampling scheme with Sobol SA#

+

In this tutorial, we will set up a workflow to investigate how sensitive +the output of a function is to its inputs. Why might you want to do +this? Imagine that this function represents a complex system, such as +the rainfall-runoff process of a watershed model, and that you, the +researcher, want to investigate how your choice of input parameter +values are affecting the model’s characterization of runoff in the +watershed. Your parameter values are likely uncertain and can take on +any value in a pre-defined range. Using a Sobol SA will allow you to +sample different values of your parameters and calculate how sensitive +your output of interest is to certain parameters. Below, we demonstrate +Sobol SA for a simple function to illustrate the method, but the +workflow can be applied to your own problem of interest!

+

In order to conduct this analysis, we will use the popular Python +Sensitivity Analysis Library +(SALib) to:

+
    +
  1. Generate a problem set as a dictionary for our Ishigami function that has three inputs

  2. +
  3. Generate 2048 samples for our problem set using the Saltelli [1] [2] sampling scheme

  4. +
  5. Execute the Ishigami function for each of our samples and gather the outputs

  6. +
  7. Compute the sensitivity analysis to generate first-order and total-order sensitivity indices using the Sobol [3] method

  8. +
  9. Interpret the meaning of our results

  10. +
+
+

B.2.1.1. Let’s get started!#

+

NOTE: Content from this tutorial is taken directly from the SALib +“Basics” +walkthrough. To step through the notebook, execute each gray (code) box +by typing “Shift+Enter”.

+
#Import relevant libraries
+import numpy as np
+import matplotlib.pyplot as plt
+from mpl_toolkits import mplot3d
+
+from SALib.sample import saltelli
+from SALib.analyze import sobol
+from SALib.test_functions import Ishigami
+
+
+
+
+

B.2.1.2. Step 1: Generate the problem dictionary#

+

The Ishigami function is of the form:

+
+\[f(x_1,x_2,x_3) = sin(x_1)+asin^2(x_2)+bx_3^4sin(x_1)\]
+

The function has three inputs, 𝑥1, 𝑥2, 𝑥3 where 𝑥𝑖 ∈ [−𝜋, 𝜋]. The +constants \(a\) and \(b\) are defined as 7.0 and 0.1 +respectively.

+
#Create a problem dictionary. Here we supply the number of variables, the names of each variable, and the bounds of the variables.
+problem = {
+    'num_vars': 3,
+    'names': ['x1', 'x2', 'x3'],
+    'bounds': [[-3.14159265359, 3.14159265359],
+               [-3.14159265359, 3.14159265359],
+               [-3.14159265359, 3.14159265359]]
+}
+
+
+
+
+

B.2.1.3. Step 2: Generate samples using the Saltelli sampling scheme#

+

Sobol SA requires the use of the Saltelli sampling scheme. The output of +the saltelli.sample function is a NumPy array that is of shape 2048 +by 3. The sampler generates 𝑁∗(2𝐷+2) samples, where in this example, N +is 256 (the argument we supplied) and D is 3 (the number of model +inputs), yielding 2048 samples. The keyword argument +calc_second_order=False will exclude second-order indices, resulting +in a smaller sample matrix with 𝑁∗(𝐷+2) rows instead. Below, we plot the +resulting Saltelli sample.

+
#Generate parmeter values using the saltelli.sample function
+param_values = saltelli.sample(problem, 256)
+
+print(f"`param_values` shape:  {param_values.shape}")
+
+
+
param_values shape:  (2048, 3)
+
#Plot the 2048 samples of the parameters
+
+fig = plt.figure(figsize = (7, 5))
+ax = plt.axes(projection ="3d")
+ax.scatter3D(param_values[:,0], param_values[:,1], param_values[:,2])
+ax.set_xlabel('X1 Parameter')
+ax.set_ylabel('X2 Parameter')
+ax.set_zlabel('X3 Parameter')
+plt.title("Saltelli Sample of Parameter Values")
+
+plt.show()
+
+
+_images/output_7_0.png +
+
+

B.2.1.4. Step 3: Execute the Ishigami function over our sample set#

+

SALib provides a nice wrapper to the Ishigami function that allows the +user to directly pass the param_values array we just generated into +the function directly.

+
Y = Ishigami.evaluate(param_values)
+
+
+
+
+

B.2.1.5. Step 4: Compute first-, second-, and total-order sensitivity indices using the Sobol method#

+

The sobol.analyze function will use our problem dictionary and the +result of the Ishigami runs (Y) to compute first-, second-, and +total-order indicies.

+
Si = sobol.analyze(problem, Y)
+
+
+

Si is a Python dict with the keys “S1”, “S2”, “ST”, “S1_conf”, +“S2_conf”, and “ST_conf”. The _conf keys store the corresponding +confidence intervals, typically with a confidence level of 95%. Use the +keyword argument print_to_console=True to print all indices. Or, we +can print the individual values from Si as shown in the next step.

+
+
+

B.2.1.6. Step 5: Interpret our results#

+

We execute the following code and take a look at our first-order indices +(S1) for each of our three inputs. These indicies can be interpreted +as the fraction of variance in the output that is explained by each +input individually.

+
first_order = Si['S1']
+
+print('First-order:')
+print(f"x1: {first_order[0]}, x2: {first_order[1]}, x3: {first_order[2]}")
+
+
+
First-order:
+x1: 0.3184242969763115, x2: 0.4303808201623416, x3: 0.022687722804980225
+
+
+

If we were to rank the importance of the inputs in how much they +individually explain the variance in the output, we would rank them from +greatest to least importance as follows: 𝑥2, 𝑥1 and then 𝑥3. Since 𝑥3 +only explains 2% of the output variance, it does not explain output +variability meaningfully. Thus, this indicates that there is +contribution to the output variance by 𝑥2 and 𝑥1 independently, whereas +𝑥3 does not contribute to the output variance. Determining what inputs +are most important or what index value is meaningful is a common +question, but one for which there is no general rule or threshold. This +question is problem and context-dependent, but procedures have been +identified to rank order influential inputs and which can be used to +identify the least influential factors. These factors can be fixed to +simplify the model [4] [5] [6].

+

Next, we evaluate the total-order indices, which measure the +contribution to the output variance caused by varying the model input, +including both its first-order effects (the input varying alone) and all +higher-order interactions across the input parameters.

+
total_order = Si['ST']
+
+print('Total-order:')
+print(f"x1: {total_order[0]}, x2: {total_order[1]}, x3: {total_order[2]}")
+
+
+
Total-order:
+x1: 0.5184119098161343, x2: 0.41021260250026054, x3: 0.2299058431439953
+
+
+

The magnitude of the total order indices are substantially larger than +the first-order indices, which reveals that higher-order interactions +are occurring, i.e. that the interactions across inputs are also +explaining some of the total variance in the output. Note that 𝑥3 has +non-negligible total-order indices, which indicates that it is not a +consequential parameter when considered in isolation, but becomes +consequential and explains 25% of variance in the output through its +interactions with 𝑥1 and 𝑥2.

+

Finally, we can investigate these higher order interactions by viewing +the second-order indices. The second-order indicies measure the +contribution to the output variance caused by the interaction between +any two model inputs. Some computing error can appear in these +sensitivity indices, such as negative values. Typically, these computing +errors shrink as the number of samples increases.

+
second_order = Si['S2']
+
+print("Second-order:")
+print(f"x1-x2:  {second_order[0,1]}")
+print(f"x1-x3:  {second_order[0,2]}")
+print(f"x2-x3:  {second_order[1,2]}")
+
+
+
Second-order:
+x1-x2:  -0.043237389723234154
+x1-x3:  0.17506452088709862
+x2-x3:  -0.03430682392607577
+
+
+

We can see that there are strong interactions between 𝑥1 and 𝑥3. Note +that in the Ishigami function, these two variables are multiplied in the +last term of the function, which leads to interactive effects. If we +were considering first order indices alone, we would erroneously assume +that 𝑥3 explains no variance in the output, but the second-order and +total order indices reveal that this is not the case. It’s easy to +understand where we might see interactive effects in the case of the +simple Ishigami function. However, it’s important to remember that in +more complex systems, there may be many higher-order interactions that +are not apparent, but could be extremely consequential in explaining the +variance of the output.

+
+
+

B.2.1.7. Tips to Apply Sobol SA to Your Own Problem#

+

In this tutorial, we demonstrated how to apply an SA analysis to a +simple mathematical test function. In order to apply a Sobol SA to your +own problem, you will follow the same general workflow that we defined +above. You will need to:

+
    +
  1. Choose sampling bounds for your parameters and set up the problem +dictionary as in Step 1 above.

  2. +
  3. Generate samples using the saltelli.sample function. This step is +problem-dependent and note that the Sobol method can be +computationally intensive depending on the model being analyzed. For +example, for a simple rainfall-runoff model such as HYMOD, it has +been recommended to run a sample size of at least N = 10,000 (which +translates to 60,000 model runs). More complex models will be slower +to run and will also require more samples to calculate accurate +estimates of Sobol indices. Once you complete this process, pay +attention to the confidence bounds on your sensitivity indices to see +whether you need to run more samples.

  4. +
  5. Run the parameter sets through your model. In the example above, the +Ishigami function could be evaluated through SALib since it is a +built in function. For your application, you will need to run these +parameter sets through the problem externally and save the output. +The output file should contain one row of output values for each +model run.

  6. +
  7. Calculate the Sobol indices. Now, the Y will be a numpy array with +your external model output and you will need to include the parameter +samples as an additional argument.

  8. +
  9. Finally, we interpet the results. If the confidence intervals of your +dominant indices are larger than roughly 10% of the value itself, you +may want to consider increasing your sample size as computation +permits. You should additionally read the references noted in Step 5 +above to understand more about identifying important factors.

  10. +
+

References

+ +
+
+
+
+

B.3. Logistic Regression Tutorial#

+
+

Note

+
+
Run the tutorial interactively: Logistic Regression Tutorial.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

B.3.1. Tutorial: Logistic Regression for Factor Mapping#

+

This tutorial replicates a scenario discovery analysis performed in +Hadjimichael et +al. (2020).

+
+

B.3.1.1. Background#

+

Planners in the the Upper Colorado River Basin (UCRB, shown in the +figure below) are seeking to understand the vulnerability of water users +to uncertainties stemming from climate change, population growth and +water policy changes. The UCRB spans 25,682 km2 in western Colorado and +is home to approximately 300,000 residents and 1,012 km2 of irrigated +land. Several thousand irrigation ditches divert water from the main +river and its tributaties for irrigation (shown as small black dots in +the figure). Transmountain diversions of approximately 567,400,000 m3 +per year are exported for irrigation, industrial and municipal uses in +northern and eastern Colorado, serving the major population centers of +Denver and Colorado Springs. These diversions are carried through +tunnels, shown as large black dots in the figure.

+_images/basin_map.png +

An important planning consideration is the water rights of each user, +defined by seniority across all water uses (irrigation diversions, +transboundary diversions, power plants etc.) in the basin. To assess the +vulnerability of users with varying degrees of water rights seniority, +planners simulate the system across an ensemble of scenarios using the +state of Colorado’s StateMod platform. The model simulates streamflow, +diversions, instream demands, and reservoir operations.

+

Hadjimichael et al. (2020) employ an exploratory analysis by simulating +a large ensemble of plausible scenarios using StateMod and then +identifying consequential decision-relevant combinations of uncertain +factors, termed scenario discovery. Focusing on decision-relevant +metrics (metrics that are important to the user, the scenario discovery +is applied to the water shortages experienced by each individual user +(i.e., not on a single basin-wide or sector-wide metric). For this +training example, we’ll be performing scenario discovery for three +different water users: two irrigation users and one municipal user.

+
+
+

B.3.1.2. Let’s get started!#

+

In this tutorial, we will be loading in data that has been produced in +Hadjimichael et al. (2020). Before we start our analysis, we’ll load the +relevant Python libraries, example data, and information for the three +users.

+
#import libraries
+import msdbook
+import numpy as np
+import pandas as pd
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+
+# load example data from Hadjimichael et al. (2020)
+msdbook.install_package_data()
+
+# Select the IDs for the three users that we will perform the analysis for
+all_IDs = ['7000550','7200799','3704614']
+usernames = ['Medium seniority irrigation',
+             'Low seniority irrigation',
+             'Transbasin municipal diversion']
+nStructures = len(all_IDs)
+
+
+
Downloading example data for msdbook version 0.1.5...
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/uncertain_params_bounds.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_metric_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/solutions.resultfile
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LHsamples_original_1000.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/param_values.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/collapse_days.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_params_256samples.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LeafCatch.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_simulations_256samples.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/Robustness.txt
+
+
+
+
+

B.3.1.3. Step 1: Load Latin hypercube sample and set up problem#

+

To examine regional vulnerability, we generate an ensemble of plausible +future states of the world (SOWs) using Latin Hypercube Sampling. For +this tutorial, we’ll load a file containing 1,000 samples across 14 +parameters. The sampled parameters encompass plausible changes to the +future state of the basin, including changes to hydrology, water demands +(irrigation, municipal & industry, transbasin), and institutional and +environmental factors (environmental flows, reservoir storage, operation +of the Shoshone Power Plant). These samples are taken from ranges +identified in param_bounds. Below we load in the 1000 samples, the +range of values that the samples can take for each parameter, and the +parameter names. More information on what each parameter constitutes can +be found in Table 1 of Hadjimichael et al., 2020.

+
#Identify the bounds for each of the 14 parameters
+param_bounds = msdbook.load_basin_param_bounds()
+
+#Load in the parameter samples
+LHsamples = msdbook.load_lhs_basin_sample()
+
+#Create an array of the parameter names
+param_names=['Irrigation demand multiplier','Reservoir loss','Transbasin demand multiplier',
+             'Municipal & industrial multiplier', 'Shoshone','Environmental flows',
+             'Evaporation change','Mean dry flow','Dry flow variance',
+             'Mean wet flow','Wet flow variance','Dry-dry probability',
+             'Wet-wet probability', 'Snowmelt shift']
+
+
+
+
+

B.3.1.4. Step 2: Define decision-relevant metrics for illustration#

+

Scenario discovery attempts to identify parametric regions that lead to +‘success’ and ‘failure’. For this demonstration we’ll be defining +‘success’ as states of the world where a shortage level doesn’t exceed +its historical frequency.

+
+
+

B.3.1.5. Step 3: Run the logistic regression#

+

Logistic regression estimates the probability that a future SOW will be +classified as a success or failure given a set of performance criteria. +A logistic regression model is defined by:

+
+\[ln \bigg (\frac{p_i}{1-p_i} \bigg ) = X^T_i \beta\]
+

where \(p_i\) is the probability the performance in the +\(i^{th}\) SOW will be classified as a success, \(X_i\) is the +vector of covariates describing the \(i^{th}\) SOW, and +\(\beta\) is the vector of coefficients describing the relationship +between the covariates and the response, which here will be estimated +using maximum likelihood estimation.

+

A logistic regression model was fit to the ensemble of SOWs using the +performance criteria defined in step 2. Logistic regression modeling was +conducted using the Statsmodel +Python package. The +data required for the full analysis is too large to include in this +tutorial, but results can be found in the data file loaded below.

+

The results files contain the occurence of different shortage frequency +and magnitude combinations under the experiment, in increments of 10, +between 0 and 100. These combinations (100 for each user) are +alternative decision-relevant metrics that can be used for scenario +discovery.

+
# Set arrays for shortage frequencies and magnitudes
+frequencies = np.arange(10, 110, 10)
+magnitudes = np.arange(10, 110, 10)
+realizations = 10
+
+# Load performance and pseudo r scores for each of the users
+results = [msdbook.load_user_heatmap_array(all_IDs[i]) / 100 for i in range(len(all_IDs))]
+
+
+
+
+

B.3.1.6. Step 4: Factor ranking#

+

To rank the importance of each uncertain factor, we utilize McFadden’s +psuedo-R2, a measure that quantifies the improvement of the model when +utilizing each given predictor as compared to prediction using the mean +of the data set:

+
+\[R^2_{McFadden}=1-\frac{ln \hat{L}(M_{full})}{ln \hat{L}(M_{intercept})}\]
+

Here \(ln \hat{L}(M_{full})\) is the log likelihood of the full +model (including the predictor) and \(ln \hat{L}(M_{intercept})\) is +the log likelihood of the intercept model (which predicts the mean +probability of success across all SOWs).

+

Higher values of McFadden’s psuedo-R2 indicate higher factor importance +(when the likelihood of the full model approaches one, the ratio of the +likelihood of the full model compared to the intercept model will get +very small).

+
#Load the pseudo-R^2 scores
+scores = [msdbook.load_user_pseudo_scores(all_IDs[i]) for i in range(len(all_IDs))]
+
+# Select indices of frequency and magnitudes that will be used for the visualization
+freq = [1,0,0]
+mag = [7,3,7]
+
+
+
+
+

B.3.1.7. Step 5: Draw factor maps#

+

The McFadden’s psuedo-R2 scores files contain preliminary logistic +regression results on parameter importance for each of these +combinations. Using these psuedo-R2 scores we will identify the two most +important factors for each metric which we’ll use to generate the final +scenario discovery maps (note: there may be more than two important +metrics for each user, but here we will demonstrate by mapping two).

+
# setup figure
+fig, axes = plt.subplots(3,1, figsize=(6,18), tight_layout=True)
+fig.patch.set_facecolor('white')
+
+for i in range(len(axes.flat)):
+
+    ax = axes.flat[i]
+
+    allSOWsperformance = results[i]
+    all_pseudo_r_scores = scores[i]
+
+    # construct dataframe
+    dta = pd.DataFrame(data=np.repeat(LHsamples, realizations, axis = 0), columns=param_names)
+    dta['Success'] = allSOWsperformance[freq[i],mag[i],:]
+
+    pseudo_r_scores = all_pseudo_r_scores[str(frequencies[freq[i]])+'yrs_'+str(magnitudes[mag[i]])+'prc'].values
+    top_predictors = np.argsort(pseudo_r_scores)[::-1][:2] #Sort scores and pick top 2 predictors
+
+    # define color map for dots representing SOWs in which the policy
+    # succeeds (light blue) and fails (dark red)
+    dot_cmap = mpl.colors.ListedColormap(np.array([[227,26,28],[166,206,227]])/255.0)
+
+    # define color map for probability contours
+    contour_cmap = mpl.cm.get_cmap('RdBu')
+
+    # define probability contours
+    contour_levels = np.arange(0.0, 1.05,0.1)
+
+    # define base values of the predictors
+    SOW_values = np.array([1,1,1,1,0,0,1,1,1,1,1,0,0,0]) # default parameter values for base SOW
+    base = SOW_values[top_predictors]
+    ranges = param_bounds[top_predictors]
+
+    # define grid of x (1st predictor), and y (2nd predictor) dimensions
+    # to plot contour map over
+    xgrid = np.arange(param_bounds[top_predictors[0]][0],
+                      param_bounds[top_predictors[0]][1], np.around((ranges[0][1]-ranges[0][0])/500,decimals=4))
+    ygrid = np.arange(param_bounds[top_predictors[1]][0],
+                      param_bounds[top_predictors[1]][1], np.around((ranges[1][1]-ranges[1][0])/500,decimals=4))
+    all_predictors = [ dta.columns.tolist()[i] for i in top_predictors]
+    dta['Interaction'] = dta[all_predictors[0]]*dta[all_predictors[1]]
+
+    # logistic regression here
+    predictor_list = [all_predictors[i] for i in [0,1]]
+    result = msdbook.fit_logit(dta, predictor_list)
+
+    # plot contour map
+    contourset = msdbook.plot_contour_map(ax, result, dta, contour_cmap,
+                                          dot_cmap, contour_levels, xgrid,
+                                          ygrid, all_predictors[0], all_predictors[1], base)
+
+    ax.set_title(usernames[i])
+
+# set up colorbar
+cbar_ax = fig.add_axes([0.98, 0.15, 0.05, 0.7])
+cbar = fig.colorbar(contourset, cax=cbar_ax)
+cbar_ax.set_ylabel('Probability of Success', fontsize=16)
+cbar_ax.tick_params(axis='y', which='major', labelsize=12)
+
+
+
/srv/conda/envs/notebook/lib/python3.7/site-packages/statsmodels/base/model.py:127: ValueWarning: unknown kwargs ['disp']
+  warnings.warn(msg, ValueWarning)
+
+
+
Optimization terminated successfully.
+         Current function value: 0.378619
+         Iterations 8
+Optimization terminated successfully.
+         Current function value: 0.397285
+         Iterations 8
+Optimization terminated successfully.
+         Current function value: 0.377323
+         Iterations 8
+
+
+_images/notebook_logistic_output_11_1.png +

The figure above demonstrates how different combinations of the +uncertain factors lead to success or failure in different states of the +world, which are denoted by the blue and red dots respectively. The +probability of success and failure are further denoted by the contours +in the figure. Several insights can be drawn from this figure.

+

First, using metrics chosen to be decision-relevant (specific to each +user) causes different factors to be identified as most important by +this scenario-discovery exercise (the x- and y-axes for each of the +subplots). In other words, depending on what the decision makers of this +system want to prioritize they might choose to monitor different +uncertain factors to track performance.

+

Second, in the top panel, the two identified factors appear to also have +an interactive effect on the metric used (shortages of a certain level +and frequency in this example). In terms of scenario discovery, the +Patient Rule Induction Method (PRIM) or Classification And Regression +Trees (CART) would not be able to delineate this non-linear space and +would therefore misclassify parameter combinations as ‘desirable’ when +they were in fact undesirable, and vice versa.

+

Lastly, logistic regression also produces contours of probability of +success, i.e. different factor-value combinations are assigned different +probabilities that a shortage level will be exceeded. This allows the +decision makers to evaluate these insights while considering their risk +aversion.

+
+
+

B.3.1.8. Tips to Apply Scenario Discovery to Your Own Problem#

+

In this tutorial, we demonstrated how to perform a scenario discovery +analysis for three different users in the UCRB. The analysis allowed us +to determine which parameters the users would be most affected by and to +visualize how different ranges of these parameters lead to success and +failure for different users. This framework can be applicable to any +other application where it is of interest to characterize success and +failure based on uncertain parameter ranges. In order to apply the same +framework to your own problem:

+
    +
  1. Choose sampling bounds for your parameters of interest, which will +represent uncertainties that characterize your system.

  2. +
  3. Generate samples for these parameters (this can be done using the +saltelli.sample function or externally).

  4. +
  5. Define what constitutes success and failure in your problem. In this +tutorial, success was defined based on not surpassing the historical +drought frequency. Choose a metric that is relevant to your problem +and decision-makers that might be involved. If your model involves an +optimization, you can also define metrics based on meeting certain +values of these objectives.

  6. +
  7. Run the parameter sets through your model and calculate success and +failure based on your metrics and across different users if +applicable. This step will allow you to create the scatter plot part +of the final figure.

  8. +
  9. If it is of interest, the contours on the figure can be created by +fitting the logistic regression model in a similiar manner as denoted +in Steps 3 and 5 of the tutorial.

  10. +
+
+
+
+
+

B.4. HYMOD Dynamics Tutorial#

+
+

Note

+
+
Run the tutorial interactively: HYMOD Notebook.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

B.4.1. Tutorial: Sensitivity Analysis of the HYMOD Model#

+

The purpose of this tutorial is to demonstrate the global sensitivity +analysis concepts and tools established in the Section 3.1 of the main +text of this eBook. This demonstration will highlight the central role +of design of experiments (Section 3.3), when implementing global +sensitivity analysis tools described in Section 3.4.

+

We’ll explore these tools and concepts using the HYdrological MODel +(HYMOD), a rainfall-runoff model developed and used for river flow +forecasting. HYMOD was chosen for demonstration because its purpose is +to abstract highly complex and non-linear systems. The methods +demonstrated in thistutorial can be applied to numerical models that +simulate other complex non-linear systems.

+

This tutorial will first introduce HYMOD and use it to simulate +streamflows in a river basin. Next, we’ll employ sensitivity analysis +concepets described in Section 3 of the main text to examine how values +of HYMOD’s parameters impact streamflow predictions. We’ll then explore +how the effects of these parameters may change over time using +time-varying sensitivtiy analysis. Finally, we’ll demonstrate concepts +presented in Chapter 7 through two ensemble-based methods of uncertainty +quantification - Generalized Likelihood Uncertainty Estimation (GLUE) +and Pre-Calibration.

+

The tutorial includes the following steps:

+

1.1 - Introduction to a simple hydrologic model (HYMOD)

+

1.2 - Input Data

+

1.3 - Running a basic simulation

+

1.4 - Model outputs

+

2.1 - Design of Experiments

+

2.2 - Sensitivity analysis for one output

+

2.3 - Sensitivity analysis across multiple outputs

+

2.4 - Time-varying sensitivity analysis

+
+
+

B.4.2. 1 - Introduction to HYMOD#

+

1.1 Overview

+

HYMOD is a hydrologic model (rainfall-runoff model) that simulates key +hydrologic fluxes such as infiltration, streamflow and +evapotranspiration. The model was originally developed and used for +river flow forecasting, but it has also been been used to explore +different sensitivity analysis (e.g., Herman et al., +2013), +uncertainty quantification (e.g., Smith et al., +2008), +and optimization (e.g., Ye et al., +2014) +concepts.

+

HYMOD accepts two inputs - daily precepitation and daily potential +evapotranspiration (PET)- and generates predicitons of daily streamflow. +HYMOD abstracts the highly non-linear process of runoff routing by +dividing the flow into two components: quick flow, representing +precipitation that quickly runs off the surface of the watershed into +the stream, and slow flow, that moves through the soil and takes much +longer to arrive at the stream.

+

To generate streamflow predictions, HYMOD first models vertical +processes within the watershed to determine how much water infiltrates +and evaporates from the soil at a given time step. It then determines +how much water should be partitioned into quick flow and slow flow +processes. Within each process it abstracts residence time (the time it +takes a unit volume of water to move through the watershed and into the +stream) using a series of “reservoirs” each with a calibrated residence +time.

+

HYMOD’s representation of hydrologic processes are shown Figure 1 below +and controlled by the following parameters:

+

\(H_{uz}\): the maximum water storage capacity of the soil (mm)

+

\(B_{exp}\): parameters describing the degree of spatial variability +within the basin between 0 and Huz

+

\(Alp\): Fraction of runoff contributing to quick flow

+

\(K_q\): Quick flow residence time of linear infinite reservoir (the +Kq values of all three linear reservoirs are the same)

+

\(K_s\): Slow flow residence time of linear infinite reservoir

+_images/hymod_schematic-DAVE.png +

HYMOD models the fraction of water that is stored in the soil +\((F(XH_{uz}))\) using the following relationship:

+
+\[F(XH_{uz}) = 1 - (1 - \frac{XH_{uz}}{H_{uz}})^{B}\]
+

where \(XH_{uz}\) is the water storage capacity of the soil; +\(H_{uz}\) is the parameter describing basin maximum water +storage capacity (mm); and \(B\) is the parameter describing the +degree of spatial variability within the basin.

+

The portion of precipitation that exceeds the water storage capacity is +treated as runoff.

+

To route runoff to streamflow, the excess runoff from the vertical +processes is split into quick flow and slow flow. The proportion of +runoff partitioned into quick flow and slow flow is determined by a +parameter \(Alp\), which ranges between 0 and 1. Quick flow is +routed through \(N\) identical quick flow tanks \(Q1, Q2... QN\) +(shown above as \(N=3\)). The rate at which runoff moves through the +quick flow system is described by the residence time of the quick frlow +tanks, \(Kq\) (day). Slow flow is routed through a parallel slow +flow tank and the rate at which slow flow is routed is described by the +slow flow residences time, \(Ks\) (day).

+

Citation: Wagener, T., Boyle, D. P., Lees, M. J., Wheater, H. S., Gupta, +H. V., & Sorooshian, S. (2001). A framework for development and +application of hydrological models. Hydrology and Earth System Sciences, +5(1), 13-26.

+

1.2 Input data

+

The HYMOD model only requires precipitation and potential +evapotranspiration as inputs. For this example, we’ll run HYMOD using +data from the Leaf River, a humid catchment located north of Collins +Mississippi that has been widely used to explore HYMOD. The dataset also +includes daily observed runoff that we later use to evaluate the +performace of each sensitvity analysis sample set.

+

In the following section of code, we’ll load the necessary python +libraries and read in the input file. For this exercise we’ll only use +the first eleven years of data. The first five rows of the input dataset +are printed to show what they look like:

+
import msdbook
+
+import numpy as np
+import pandas as pd
+import seaborn as sns
+
+from sklearn import metrics
+from matplotlib import pyplot as plt
+
+# load example data
+msdbook.install_package_data()
+
+# load the Leaf River HYMOD input file
+leaf_data = msdbook.load_hymod_input_file()
+
+# extract the first eleven years of data
+leaf_data = leaf_data.iloc[0:4015].copy()
+
+print('Leaf River Data structure:')
+
+# There are only three columns in the file including precipitation, potential evapotranspiration and  streamflow
+leaf_data.head()
+
+
+
Downloading example data for msdbook version 0.1.5...
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/uncertain_params_bounds.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_metric_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/solutions.resultfile
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LHsamples_original_1000.txt
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/param_values.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/collapse_days.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_params_256samples.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_s1.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_heatmap.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_delta.npy
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_pseudo_r_scores.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LeafCatch.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_simulations_256samples.csv
+Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/Robustness.txt
+Leaf River Data structure:
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PrecipPot_ETStrmflw
00.04.600.29
10.04.310.24
20.04.330.21
30.04.780.19
40.02.910.18
+

To visualize catchment hydrology, streamflow and precipitation data are +usually plotted together as a combined hydrograph (streamflow ) and +hyetograph (rainfall, from Greek.hyetos, “rain”). Streamflow is plotted +as a time series, while rainfall is shown as an inverted bar plot along +the top of the graph. Streamflow labels are shown on the left y-axis, +while rainfall labels are shown on the right y-axis.

+
# make an axis for the hydrograph
+fig, strmflw_ax = plt.subplots(figsize=[12,6])
+strmflw_ax.set_ylim([0, 50])
+
+#make a second y-axis for the hyetograph
+precip_ax = strmflw_ax.twinx()
+precip_ax.set_ylim([0, 200])
+precip_ax.invert_yaxis()
+
+precip = leaf_data['Precip']
+strmflw_ax.plot(range(0, len(leaf_data['Precip'])), leaf_data['Strmflw'], color='lightcoral')
+strmflw_ax.set_ylabel('Streamflow (mm/day)')
+
+precip_ax.bar(range(0, len(leaf_data['Precip'])), leaf_data['Precip'], width=2)
+precip_ax.set_ylabel('Rainfall (mm/day)')
+precip_ax.legend(['Precip'], loc='center right')
+strmflw_ax.legend(['Streamflow'],bbox_to_anchor=(1, 0.48))
+
+
+
<matplotlib.legend.Legend at 0x7f53b95c6850>
+
+
+_images/hymod1.png +

1.3 Running a Baseline Model Simulation

+

We’ll start our experiment by running HYMOD using its default +parameters.

+
# assign input parameters to generate a baseline simulated streamflow
+Nq = 3  # Number of quickflow routing tanks
+Kq = 0.5 # Quickflow routing tanks' rate parameter
+Ks =  0.001 # Slowflow routing tank's rate parameter
+Alp = 0.5 # Quick/slow split parameter
+Huz = 100 # Maximum height of soil moisture accounting tank
+B = 1.0 # Scaled distribution function shape parameter
+
+# Note that the number of years is 11. One year of model warm-up and ten years are used for actual simulation
+model = msdbook.hymod(Nq, Kq, Ks, Alp, Huz, B, leaf_data, ndays=4015)
+
+
+

1.4 Model Outputs

+

Model outputs include actual evapotranspiration, quick and fast +streamflow, and combined runoff. In this tutorial we focus on the total +daily runoff, QQ (\(m^3/s\)). We can use the following script to +plot simulated streamflow against observed streamflow.

+
+

Tip

+

View the source code used to create this plot here: plot_observed_vs_simulated_streamflow

+
+
ax = msdbook.plot_observed_vs_simulated_streamflow(df=leaf_data, hymod_dict=model)
+
+
+_images/hymod2.png +

So how does our model perform? We can investigate model performance +across several metrics:

+

1: Mean Absolute Error (MAE); MAE conveys how the model performs on +average across the 10 year simulation period, with smaller values +indicating better performance. The absolute value is taken so that +positive and negative errors do not cancel each other out.

+
+\[MAE = \frac{1}{N}\sum_{t=0}^N\left\lvert Q_{sim,t}-Q_{obs,t}\right\rvert\]
+

2: Root Mean Square Error (RMSE); RMSE is sum of square errors across +the 10 year simulation period. RMSE is sensitive to large errors between +the historical record and the simulated flows, and thus is useful for +highlighting the model’s ability to capture of extreme flood events.

+
+\[RMSE = \sqrt{\frac{1}{N}\sum_{t=1}^{N}(Q_{sim,t}-Q_{obs,t})^2}\]
+

3: Log-Root Mean Square Error (Log(RMSE)) LOG(RMSE) focuses on model +performance during low-flow events.

+
+\[LOG(RMSE) = log(RMSE)\]
+
mae = np.mean(abs(leaf_data['Strmflw'] - model['Q']))
+mse = metrics.mean_squared_error(model['Q'], leaf_data['Strmflw'])
+rmse = mse**(1/2)
+
+print('MAE: ' + str(mae) + '\nRMSE: ' + str(mse) + '\nLOG(RMSE): ' + str(rmse))
+
+
+
MAE: 1.0787471470460999
+RMSE: 4.375695937555197
+LOG(RMSE): 2.0918164206151544
+
+
+

The error metrics show that HYMOD performs reasonably well, the MAE is +around 1 \(m^3/s\), the RMSE is on the order of 10% of the largest +observed streamflow and the LOG(RMSE) is fairly low.

+
+
+

B.4.3. 2- Global Sensitivity Analysis#

+

2.1 Experimental Design and Setup

+

Now we’ll examine how sensitive streamflow simulations generated by +HYMOD are to the model’s input parameters. We’ll perform global +sensitivity analysis (see Section 3.1 of the main text) using the SALib +Python library.

+
from SALib.sample import saltelli
+from SALib.analyze import sobol
+from SALib.analyze import delta
+
+
+

A first and critical step when conducting sensitivity analysis is +determining the experimental design (see Design of Experiments, Section +3.4 of the main text). Our experimental design involves defining the +uncertainties that we’ll be examining, the output of interest, the +ranges of each uncertainty that will be explored and the strategy for +sampling the uncertainty space.

+

For this experiment we’ll explore the five parameters highlighted in +Figure 1. We’ll draw their ranges from existing literature on the model +(note Jon H. paper). We’ll use a Sobol sampling an a quasi-random +sampling with low sequences approach to sample the uncertainty space +(Section 3.3.4).

+

In this demonstration we’ll utilize Sobol Sensitivity Analysis, a +variance based method (Section 3.4.5).

+

To explore HYMOD’s behavoir, we’ll examine the sensitivity of four model +ouputs to input parameters: 1) predicted flow, 2) Mean Absolute Error +(compared with the calibaration data set), 3) Root Mean Square Error and +4) Log Root Mean Square Error.

+

This analysis will employ SALib, a Python implementation also utilized +in the other SA tutorial (make this more formal).

+

To start our analysis, we’ll create a dictionary that describes our +model uncertainties and their ranges, this dictionary is named +“problem_hymod” (SALib refers to these dictionaries as “problems”).

+
problem_hymod = {
+    'num_vars': 5,
+    'names': ['Kq', 'Ks', 'Alp', 'Huz', 'B'],
+    'bounds': [[0.1, 1],  # Kq
+               [0, 0.1],  # Ks
+               [0, 1],    # Alp
+               [0.1, 500],  # Huz
+               [0, 1.9]]  # B
+}
+
+
+

After defining our uncertainites and ranges, we’ll use SALib to sample +the uncertainty space and run the model for each of the sample sets. We +will load a sample that has already been created param_values_hymod +for demonstration purposes. For HYMOD, literature recommends running at +least N = 10,000 samples, to keep this demonstration easy to run +however, we utilize only 256 sobol samples of uncertainties. To generate +accurate approximations of second order sensitivity indicies SALib +generates N*(2k+2) sets of samples, where N=256 and k=5 (number of +uncertainties). For the math behind why this is needed, see (Saltelli, +A., 2002. Making best use of model evaluations to compute sensitivity +indices. Computer Physics Communications 145, 280–297. +https://doi.org/10.1016/S0010-4655(02)00280-1).

+

The actual model simulation takes an extended period, so we also load +the simulation data from a previous run. The following demonstrates how +to conduct this analysis:

+
# generate 256 samples.
+param_values_hymod = saltelli.sample(problem_hymod, 256)
+
+# dictionary to store outputs in
+d_outputs = {}
+
+# run simulation for each parameter sample
+for i in range(0, len(param_values_hymod)):
+
+    # run model for each sensitivity analysis parameter sets
+    hymod_output = msdbook.hymod(Nq,
+                                 param_values_hymod[i, 0],
+                                 param_values_hymod[i, 1],
+                                 param_values_hymod[i, 2],
+                                 param_values_hymod[i, 3],
+                                 param_values_hymod[i, 4],
+                                 leaf_data,
+                                 ndays=4015)
+
+    # store the simulated total flow discharge
+    d_outputs[f"Q{i}"] = hymod_output["Q"]
+
+
+Q_df_bw = pd.DataFrame(d_outputs)
+
+
+
# load previously generated parameter values
+param_values_hymod = msdbook.load_hymod_params()
+
+# number of samples
+n_samples = len(param_values_hymod)
+
+# load previously generated hymod simulated outputs
+Q_df_bw = msdbook.load_hymod_simulation()
+
+# column names of each sample simulation number
+sample_column_names = [i for i in Q_df_bw.columns if i[0] == 'Q']
+
+
+

A hydrological model such as HYMOD usually includes ordinary +differential equations that are sensitive to their initial condition. +They also have components in their underlying formulation that have long +memory such that prior time steps can affect their current simulations. +For example, soil moisture or groundwater can hold water for a long time +and therefore they are often considered to exhibit a long memory. This +can affect the partitioning of water to runoff and infiltration, while +also controlling the generation of base flow. Therefore, it is important +to have a reasonable initial value for them. To achieve this, +hydrologists usually extend their simulation period and after the +simulations, they remove that extended time period that has unreasonable +groundwater or surface water values. This time period is called the +warm-up time period.

+

Here we extended our simulation for one year (from 10 years to 11 years) +and we removed the first year of simulation, therefore our warm-up +period is one year.

+
# exclude the first year of simulation from the simulations and reset the index
+Q_df = Q_df_bw.iloc[365:4015].copy().reset_index(drop=True)
+
+# exclude the first year of the input data and reset the index
+leaf_data = leaf_data.iloc[365:4015].copy().reset_index(drop=True)
+
+
+

Now that HYMOD has been warmed up, we’ll examine how HYMOD’s streamflow +outputs vary under different sample sets, and compare them with the +observed streamflow.

+
# add date columns to our simulation data frame; for this data our start date is 1/1/2000
+date_ts = pd.date_range(start='1/1/2000', periods=3650, freq='D')
+Q_df['date'] = date_ts
+Q_df['year'] = date_ts.year
+Q_df['month'] = date_ts.month
+Q_df['day'] = date_ts.day
+
+# aggregate the simulated observed streamflow to monthly mean
+df_sim_mth_mean = Q_df.groupby(['year', 'month'])[sample_column_names].mean()
+
+# do the same for the observed data
+date_ts = pd.date_range(start='1/1/2000', periods=len(leaf_data), freq='D')
+leaf_data['date'] = date_ts
+leaf_data['year'] = date_ts.year
+leaf_data['month'] = date_ts.month
+leaf_data['day'] = date_ts.day
+
+# aggregate the daily observed streamflow to monthly mean
+df_obs_mth_mean = leaf_data.groupby(['year', 'month']).mean()
+
+
+
+

Tip

+

View the source code used to create this plot here: plot_observed_vs_sensitivity_streamflow

+
+
ax = msdbook.plot_observed_vs_sensitivity_streamflow(df_obs=df_obs_mth_mean,
+                                                     df_sim=df_sim_mth_mean)
+
+
+_images/hymod3.png +

2.2 Sensitivity of streamflows to model parameters

+

Now we’ll examine how each of HYMOD’s parameters impact the variance of +simulated streamflows. Using SALib we’ll calculate the first order and +total order sensitivity indicies of each model parameter. The first +order sensitivity index measure’s the individual impact that a given +parameter has on the variance of the simulated streamflows. The total +order index measures the impact of a given parameter along with all +interactions that other parameters have with the given parameter on +simulated streamflows.

+

We’ll start with an matrix, Y, which contains our simulated streamflows +for every uncertainty sample. We’ll then use the sobol.analyze function +from SALib to calculate the sensitivity indicies (Si). The arguments for +this function are the problem dictionary defined in part 2.2 of this +tutorial, and the matrix of simulated streamflows, Y.

+
# overall aggregated indices
+Y = Q_df[sample_column_names].mean().to_numpy()
+
+# Perform analysis
+Si = sobol.analyze(problem_hymod, Y)
+
+
+

Now we can examine our results, we’ll print the first order and total +order Si’s for each parameter, then visualize the results with bar plots

+
print('First order indices = ', Si['S1'])
+
+print('Total order indicies = ', Si['ST'])
+
+sns.set_style('white')
+fig = plt.figure(figsize=(8,4))
+ax1 = fig.add_subplot(121)
+ax1.bar(np.arange(5), Si['S1'])
+ax1.set_xticklabels(['','Kq', 'Ks', 'Alp', 'Huz', 'B'])
+ax1.set_ylabel('First order Si')
+ax1.set_ylim([0,1])
+
+ax2 = fig.add_subplot(122)
+ax2.bar(np.arange(5), Si['ST'])
+ax2.set_xticklabels(['','Kq', 'Ks', 'Alp', 'Huz', 'B'])
+ax2.set_ylabel('Total order Si')
+ax2.set_ylim([0,1])
+
+
+
First order indices =  [9.55550001e-05 7.49249463e-04 5.62386413e-04 7.03327551e-01
+ 2.53701895e-01]
+Total order indicies =  [1.76174200e-06 1.63288175e-03 3.41378460e-04 6.88983864e-01
+ 2.53922146e-01]
+
+
+
/srv/conda/envs/notebook/lib/python3.7/site-packages/ipykernel_launcher.py:9: UserWarning: FixedFormatter should only be used together with FixedLocator
+  if __name__ == '__main__':
+/srv/conda/envs/notebook/lib/python3.7/site-packages/ipykernel_launcher.py:15: UserWarning: FixedFormatter should only be used together with FixedLocator
+  from ipykernel import kernelapp as app
+
+
+
(0.0, 1.0)
+
+
+_images/hymod4.png +

Our findings indicate that in this instance, the streamflow estimate +from HYMOD is highly sensitive to soil moisture parameters Huz and B and +hardly affected by the routing parameters. Notably, there is very little +interactions between parameters causing the total order indicies to be +nearly identical to the first order indicies.

+

2.3 How do different performance metrics affect the results of our +sensitivity analysis?

+

Streamflow has many different properties. In this section, we discuss +how the selection of metrics can lead to fundamentally different +sensitivity analysis results. For example, one can only focus on +aggregated streamflow metrics such as mean (what has been presented so +far), or only on extreme events such as drought or floods.

+

Here we compare three different metrics: 1- Mean error (ME) 2- Root Mean +Square Error (RMSE) 3- Log-Root Mean Square Error (Log(RMSE))

+

Each of these metrics focuses on a specific attribute of streamflow. For +example, RMSE highlights the impacts of extreme flood events, while +LOG(RMSE) focuses on model performance during low-flow events.

+
# calculate error metrics
+mae = Q_df[sample_column_names].apply(lambda x: abs(x-leaf_data["Strmflw"]), axis=0)
+mse = Q_df[sample_column_names].apply(lambda x: metrics.mean_squared_error(x, leaf_data["Strmflw"]), axis=0)
+rmse = mse**(1/2)
+
+# add error metrics to a dictionary
+d_metrics = {'MAE': mae.mean().values,
+             'RMSE': rmse.values,
+             'LOG[RMSE]': np.log10(rmse.values)}
+
+# convert to a dataframe
+df_metrics_SA = pd.DataFrame(d_metrics)
+
+
+

We can use the following to calculate the SA indices for each metric and +visualize it.

+
df_metric_s1_result = pd.DataFrame(np.zeros((3, 5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+df_metric_sT_result = pd.DataFrame(np.zeros((3, 5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+
+# conduct sensitivity analysis for each metric
+for index, i in enumerate(d_metrics.keys()):
+
+    # get the data as a numpy array for the target metric
+    Y = d_metrics[i]
+
+    # use the metric to conduct SA
+    Si = sobol.analyze(problem_hymod, Y, print_to_console=False)
+
+    # add the sensitivity indices to the output data frame
+    df_metric_s1_result.iloc[index, :] = Si['S1']
+    df_metric_sT_result.iloc[index, :] = Si['ST']
+
+
+
# create seaborn heatmap with required labels
+fig = plt.figure(figsize=(12,4))
+ax1 = fig.add_subplot(121)
+# labels for y-axis
+y_axis_labels = ['Mean Absoulte Error', 'RSME', 'Log(RMSE)']
+
+# plot heatmap
+ax1 = sns.heatmap(df_metric_s1_result, yticklabels=y_axis_labels, annot=True,  cmap='inferno_r', cbar_kws={'label': 'Si'}, cbar=False)
+ax1.figure.axes[-1].yaxis.label.set_size(14)
+ax1.set_title('First Order Sensitivity')
+
+ax2 = fig.add_subplot(122)
+ax2 = sns.heatmap(df_metric_sT_result, yticklabels=y_axis_labels, annot=True,  cmap='inferno_r', cbar_kws={'label': 'Si'})
+ax2.figure.axes[-1].yaxis.label.set_size(14)
+ax2.set_title('Total Order Sensitivity')
+
+
+
Text(0.5, 1.0, 'Total Order Sensitivity')
+
+
+_images/hymod5.png +

The first order sensitivity indicies indicate that HYMOD’s sensitivity +to its parameters is different depending on how its output is measured. +Unsurprisingly, the mean absolute error is highly sensitive to the soil +moisture accounting parameters Huz and B, just like the overall +streamflow predictions above. However, when we examine RMSE and +log(RMSE), the routing parameters Alp become sensitive, and the +sensitivity to parameter B is reduced. As described above, RMSE and +LOG(RMSE) respond to model performance in high-flow and low flow periods +respectively. Our results indicate that for these flow regimes Alp, the +parameter that governs the split between quick and slow flow is an +important factor. While still the parameter with the highest most effect +on all three measures, Huz is much less influential for RMSE and +LOG(RMSE) than it is for MAE.

+

The total order sensitivity indicies review a different, more complex +story. While the MAE sensitivity is relatively governed by first order +effects (like the streamflow predictions above), the RMSE and LOG(RMSE) +error metrics show significant interactions. Alp has the highest total +order sensitivity for RMSE and is eqal to Huz for Log(RMSE). Kq, which +has a relatively low first order sensitivity index, shows strong +contribution to variance when interactions are taken into account.

+

Radial convergence plots are a helpful way to visualize the interactions +between parameters. These plots array the model parameters in a circle +and plot the first order, total order and second order Sobol sensitivity +indices for each parameter. The first order sensitivity is shown as the +size of a closed circle, the total order as the size of a larger open +circle and the second order as the thickness of a line connecting two +parameters. Below is an example of a radial convergence plot for the +LOG(RMSE) measure. The plot indicates strong interactions between the +Huz and Alp parameters, as well as Alp and Kq. There is also an +interaction between Alp and Ks.

+
import numpy as np
+import itertools
+import seaborn as sns
+import math
+sns.set_style('whitegrid', {'axes_linewidth': 0, 'axes.edgecolor': 'white'})
+
+def is_significant(value, confidence_interval, threshold="conf"):
+    if threshold == "conf":
+        return value - abs(confidence_interval) > 0
+    else:
+        return value - abs(float(threshold)) > 0
+
+def grouped_radial(SAresults, parameters, radSc=2.0, scaling=1, widthSc=0.5, STthick=1, varNameMult=1.3, colors=None, groups=None, gpNameMult=1.5, threshold="conf"):
+    # Derived from https://github.com/calvinwhealton/SensitivityAnalysisPlots
+    fig, ax = plt.subplots(1, 1)
+    color_map = {}
+
+    # initialize parameters and colors
+    if groups is None:
+
+        if colors is None:
+            colors = ["k"]
+
+        for i, parameter in enumerate(parameters):
+            color_map[parameter] = colors[i % len(colors)]
+    else:
+        if colors is None:
+            colors = sns.color_palette("deep", max(3, len(groups)))
+
+        for i, key in enumerate(groups.keys()):
+            #parameters.extend(groups[key])
+
+            for parameter in groups[key]:
+                color_map[parameter] = colors[i % len(colors)]
+
+    n = len(parameters)
+    angles = radSc*math.pi*np.arange(0, n)/n
+    x = radSc*np.cos(angles)
+    y = radSc*np.sin(angles)
+
+    # plot second-order indices
+    for i, j in itertools.combinations(range(n), 2):
+        #key1 = parameters[i]
+        #key2 = parameters[j]
+
+        if is_significant(SAresults["S2"][i][j], SAresults["S2_conf"][i][j], threshold):
+            angle = math.atan((y[j]-y[i])/(x[j]-x[i]))
+
+            if y[j]-y[i] < 0:
+                angle += math.pi
+
+            line_hw = scaling*(max(0, SAresults["S2"][i][j])**widthSc)/2
+
+            coords = np.empty((4, 2))
+            coords[0, 0] = x[i] - line_hw*math.sin(angle)
+            coords[1, 0] = x[i] + line_hw*math.sin(angle)
+            coords[2, 0] = x[j] + line_hw*math.sin(angle)
+            coords[3, 0] = x[j] - line_hw*math.sin(angle)
+            coords[0, 1] = y[i] + line_hw*math.cos(angle)
+            coords[1, 1] = y[i] - line_hw*math.cos(angle)
+            coords[2, 1] = y[j] - line_hw*math.cos(angle)
+            coords[3, 1] = y[j] + line_hw*math.cos(angle)
+
+            ax.add_artist(plt.Polygon(coords, color="0.75"))
+
+    # plot total order indices
+    for i, key in enumerate(parameters):
+        if is_significant(SAresults["ST"][i], SAresults["ST_conf"][i], threshold):
+            ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["ST"][i]**widthSc)/2, color='w'))
+            ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["ST"][i]**widthSc)/2, lw=STthick, color='0.4', fill=False))
+
+    # plot first-order indices
+    for i, key in enumerate(parameters):
+        if is_significant(SAresults["S1"][i], SAresults["S1_conf"][i], threshold):
+            ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["S1"][i]**widthSc)/2, color='0.4'))
+
+    # add labels
+    for i, key in enumerate(parameters):
+        ax.text(varNameMult*x[i], varNameMult*y[i], key, ha='center', va='center',
+                rotation=angles[i]*360/(2*math.pi) - 90,
+                color=color_map[key])
+
+    if groups is not None:
+        for i, group in enumerate(groups.keys()):
+            print(group)
+            group_angle = np.mean([angles[j] for j in range(n) if parameters[j] in groups[group]])
+
+            ax.text(gpNameMult*radSc*math.cos(group_angle), gpNameMult*radSc*math.sin(group_angle), group, ha='center', va='center',
+                rotation=group_angle*360/(2*math.pi) - 90,
+                color=colors[i % len(colors)])
+
+    ax.set_facecolor('white')
+    ax.set_xticks([])
+    ax.set_yticks([])
+    plt.axis('equal')
+    plt.axis([-2*radSc, 2*radSc, -2*radSc, 2*radSc])
+    #plt.show()
+
+
+    return fig
+
+# define groups for parameter uncertainties
+groups={"Soil Moisture" : ["Huz", "B"],
+        "Routing" : ["Alp", "Kq", "Ks"]}
+
+
+fig = grouped_radial(Si, ['Kq', 'Ks', 'Alp', 'Huz', 'B'], groups=groups, threshold=0.025)
+
+
+
Soil Moisture
+Routing
+
+
+_images/hymod6.png +

2.4 Time-Varying Sensitivity Analysis

+

In section 2.5 we saw how performing sensitivity analysis on different +measurements of model output can yeild in different results on the +importance of each uncertain input. In this section we’ll examine how +performing this analysis over time can yeild additional insight into the +performance of HYMOD. We’ll first examine how model sensitivities vary +by month, then examine how they change across each year of the +simulation.

+

For this demonstration, we’ll focus only on the monthly streamflow +predictions generated by HYMOD.

+
# aggregate simulated streamflow data to monthly time series
+df_sim_by_mth_mean = Q_df.groupby('month')[sample_column_names].mean()
+
+# aggregate observed streamflow data to monthly time series
+df_obs_by_mth_mean = leaf_data.groupby('month').mean()
+
+
+

We can use the following to calculate the SA indices for each month and +visualize it. Results are pre-loaded for efficiency.

+
# set up dataframes to store outputs
+df_mth_s1 = pd.DataFrame(np.zeros((12,5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+df_mth_delta = df_mth_s1.copy()
+
+# iterate through each month
+for i in range(0, 12):
+
+    # generate the simulation data
+    Y = df_sim_by_mth_mean.iloc[i, :].to_numpy()
+
+    # run SA
+    Si = delta.analyze(problem_hymod, param_values_hymod, Y, print_to_console=False)
+
+    # add to output dataframes
+    df_mth_s1.iloc[i, :] = np.maximum(Si['S1'], 0)
+    df_mth_delta.iloc[i, :] = np.maximum(Si['delta'], 0)
+
+# convert to arrays
+arr_mth_s1 = df_mth_s1.values
+arr_mth_delta = df_mth_delta.values
+
+
+

The following can be used to visualize the time-varying first-order +indices. The first order represents the direct impacts of a specific +parameter on model outputs.

+
+

Tip

+

View the source code used to create this plot here: plot_monthly_heatmap

+
+
# load previously ran data
+arr_mth_delta, arr_mth_s1 = msdbook.load_hymod_monthly_simulations()
+
+# plot figure
+ax, ax2 = msdbook.plot_monthly_heatmap(arr_sim=arr_mth_s1.T,
+                                       df_obs=df_obs_by_mth_mean,
+                                       title='First Order - Mean Monthly SA')
+
+
+_images/hymod7.png +

This figure demonstrates the first order sensitivity indices when the +streamflow data are aggregated by month. The purple line represents the +observed monthly discharge. The figure indicates that the first order +indices are highest for B and Huz across all months and lowest for Alp, +Ks, and Kq. Note that in the months with the highest flow, Ks becomes an +influential parameter.

+

We can also focus on the total order sensitivity index that includes +first-order SA indices and interactions between parameters

+
# plot figure
+ax, ax2 = msdbook.plot_monthly_heatmap(arr_sim=arr_mth_delta.T,
+                                       df_obs=df_obs_by_mth_mean,
+                                       title='Total Order - Mean monthly SA')
+
+
+_images/hymod8.png +

Notably, the total order sensitivity results are different than the +first order sensitivity results, which indicates that interactions +between the parameters (particularly in regards to routing parameters +\(Kq\), \(Ks\), and \(Alp\)) contribute to changes in HYMOD +output.

+
# group by year and get mean
+df_sim_by_yr_mean = Q_df.groupby(['year'])[sample_column_names].mean()
+
+# group input data and get mean
+df_obs_by_yr_mean = leaf_data.groupby(['year']).mean()
+
+
+

We can also calculate the sensitivity analysis indices for each +individual year. This will allow us to understand if model control +changes during different years. The following code first aggregates the +outputs to annual time steps, and then calculates the SA indices.

+
# set up dataframes to store outputs
+df_yr_s1 = pd.DataFrame(np.zeros((10, 5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+df_yr_delta = df_yr_s1.copy()
+
+# iterate through each year
+for i in range(0, 10):
+
+    # generate the simulation data
+    Y = df_sim_by_yr_mean.iloc[i, :].to_numpy()
+
+    # run SA
+    Si = delta.analyze(problem_hymod, param_values_hymod, Y, print_to_console=False)
+
+    # add to output dataframes
+    df_yr_s1.iloc[i, :] = np.maximum(Si['S1'], 0)
+    df_yr_delta.iloc[i, :] = np.maximum(Si['delta'], 0)
+
+# convert to arrays
+arr_yr_s1 = df_mth_s1.values
+arr_yr_delta = df_mth_delta.values
+
+
+
+

Tip

+

View the source code used to create this plot here: plot_annual_heatmap

+
+
# load previously ran data
+arr_yr_delta, arr_yr_s1 = msdbook.load_hymod_annual_simulations()
+
+# plot figure
+ax, ax2 = msdbook.plot_annual_heatmap(arr_sim=arr_yr_s1.T,
+                                      df_obs=df_obs_by_yr_mean,
+                                      title='First Order - Mean Annual SA')
+
+
+_images/hymod9.png +

The first order sensitivities at the annual scale are not unlike the +first order monthly sensitivities. Once again, sensitivities vary across +year and Huz and B are the most consequential parameters.

+
# plot figure
+ax, ax2 = msdbook.plot_annual_heatmap(arr_sim=arr_yr_delta.T,
+                                      df_obs=df_obs_by_yr_mean,
+                                      title='Total Order - Mean Annual SA and Observed flow')
+
+
+_images/hymod10.png +

Our results indicate that sensitivity analysis indices vary in different +years and now that interactions are included, the Kq, Ks, and Alp +variables impact the sensitivity of the streamflow output.

+

Although time-varying sensitivity analysis (TVSA) at average monthly and +average annual temporal resolutions is informative, TVSA is susceptible +to the aggregation issue that we discussed earlier in section 3-2. To +avoid that we can further discretize our time domain to zoom into +individual months. This will provide us with even more information about +model behavior and the sensitivity of different parameters in different +states of the system. The block of code demonstrates how to implement +the monthly TVSA.

+
# set up dataframes to store outputs
+df_vary_s1 = pd.DataFrame(np.zeros((df_obs_mth_mean.shape[0], 5)),
+                          columns=['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+
+df_vary_delta = df_vary_s1.copy()
+
+# iterate through each month
+for i in range(0, df_obs_mth_mean.shape[0]):
+
+    # generate the simulation data
+    Y = df_sim_mth_mean.iloc[i, :].to_numpy()
+
+    # run SA
+    Si = delta.analyze(problem_hymod, param_values_hymod, Y, print_to_console=False)
+
+    # add to output dataframes
+    df_vary_s1.iloc[i, :] = np.maximum(Si['S1'], 0)
+    df_vary_delta.iloc[i, :] = np.maximum(Si['delta'], 0)
+
+# convert to arrays
+arr_vary_s1 = df_vary_s1.values
+arr_vary_delta = df_vary_delta.values
+
+
+
+

Tip

+

View the source code used to create this plot here: plot_varying_heatmap

+
+
# load in previously ran data
+arr_vary_delta, arr_vary_s1 = msdbook.load_hymod_varying_simulations()
+
+# plot figure
+ax, ax2 = msdbook.plot_varying_heatmap(arr_sim=arr_vary_s1.T,
+                                      df_obs=df_obs_mth_mean,
+                                      title='First Order - Time-Varying SA')
+
+
+_images/hymod11.png +

Compared to the TVSA when streamflow was aggregated, this figure +suggests that Kq is indeed a relevant parameter for influencing +streamflow output when individual months are considered.

+
# plot figure
+ax, ax2 = msdbook.plot_varying_heatmap(arr_sim=arr_vary_delta.T,
+                                      df_obs=df_obs_mth_mean,
+                                      title='Total Order - Time-Varying SA')
+
+
+_images/hymod12.png +

As above, the total order sensitivities further indicate the importance +of Kq that is not apparent if aggregation is utilized.

+
+
+

B.4.4. Tips to Apply this methodology to your own problem#

+

In this tutorial, we demonstrated how to use global sensitivtiy analysis +to explore a complex, non-linear model. We showed how measuring +sensitivity across multiple measures of model performance and temporal +aggregations yeilding differing results about model +sensitivity/behavoir. While these results may seem contraditory, they +provide useful insight into the behavoir of HYMOD. Would we expect the +same parameters to control high flow and low flow regimes within the +model? Maybe, depending on the system, but also, maybe not. This +analysis can provide insight into how the model responds to its input +parameters, allowing us to compare the results to our expectaions. This +may allow us to find problems with our intial assumptions, or call +attention to model features that can be improved or expanded upon. +Depending on the model and context, it may also yield insight into the +workings of the underlying system.

+

To run this tutorial on your own model you will need to:

+
    +
  1. Design your experiment by choosing sampling bounds for your +parameters and setting up the problem dictionary as in step 2-2

  2. +
  3. Choose the parameters of interest

  4. +
  5. Generate samples using the saltelli.sample function. This step is +problem-dependent and note that the Sobol method can be +computationally intensive depending on the model being analyzed. More +complex models will be slower to run and will also require more +samples to calculate accurate estimates of Sobol indices. Once you +complete this process, pay attention to the confidence bounds on your +sensitivity indices to see whether you need to run more samples.

  6. +
  7. Run the parameter sets through your model and record each of the +desired model outputs.

  8. +
  9. Calculate the Sobol indices for each performance criteria. Now, the Y +will be a numpy array with your external model output and you will +need to include the parameter samples as an additional argument.

  10. +
  11. Follow the procedure in step 2.6 to disaggregate performance across +time

  12. +
+
+
+
+

B.5. Time-evolving scenario discovery for infrastructure pathways#

+
+

Note

+
+
Run the tutorial interactively: Scenario Discovery Notebook.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

B.5.1. Time-evolving scenario discovery for infrastructure pathways#

+

The purpose of this tutorial is to explore time-evolving vulnerability for systems that dynamically adapt to changing conditions. Using an example from water supply planning, we’ll first examine how performance of a dynamic infrastructure pathway policy changes over time, then use factor mapping (main text Chapter 4.3) to understand which combinations of uncertaities generate vulnerability for two water utilities. Next, we’ll perform factor prioritization (main text Chapter 4.3) to determine which uncertainties have the most influence on water supply performance. Finally, we’ll provide an open platform to explore vulnerability across multiple measures of performance and different combinations of uncertainties.

+
+

B.5.1.1. Background#

+

The Bedford-Greene metropolitan area (Figure 1) is a stylized water resources test case where two urban water utilities seek to develop an infrastructure and investment and management strategy to confront growing demands and changing climate. The utilities have agreed to jointly construct a new water treatment plant on Lake Classon, a large regional resource. Both utilities have also identified a set of individual infrastructure options to construct if necessary.

+
+Figure 1 +
+

Fig. B.1 Figure 1#

+
+
+

The utilities are formulating a cooperative and adaptive regional management strategy that uses a risk-of-failure (ROF) metric to trigger both short term drought mitigation actions (water use restrictions and treated transfers between utilities) and long-term infrastructure investment decisions (shown in Figure 2a). Both utilities have specified a set of risk triggers and developed a construction order for available infrastructure options.

+

The utilities have run a Monte Carlo simulation to evaluate how these policies respond to a wide array of future States Of the World (SOWs). Each SOW represents a different combinations of thirteen uncertain system inputs including demand growth rates, changes to streamflows and financial variables. In this context, a fully specified SOW is composed of one sample of uncertain model inputs (e.g. one projection of demand growth rate coupled with one future streamflow scenario and one projection of future financial conditions). The water utilities used Latin Hypercube sampling (Chapter 3.3 of the main text) to develop an ensemble of 1,000 plausible future SOWs. The Monte Carlo simulation evaluates each candidate water supply infrastructure investment and management policy across all 1,000 SOWs, as shown in Figure 2b. For more details on the Monte Carlo sampling for this type of analysis, see Trindade et al., (2019).

+

The ROF-based policies respond to each SOW by generating a unique infrastructure pathway - a sequence of infrastructure investment decisions over time. Infrastructure pathways over a set of 1,000 future SOWs are shown in Figure 2c. Infrastructure options are arrayed along the vertical axis and the sequence of infrastructure investments triggered by the policy is plotted as pathways over time. Since the adaptive rule system generates a unique infrastructure sequence for each scenario, Figure 2c summarizes the ensemble of pathways by clustering SOWs according to infrastructure intensity. Dark green lines represent SOWs where the utilities heavily invest in new infrastructure, light green lines represent SOWs with low infrastructure investment and medium shaded lines represent moderate investment. The shading behind each pathway represents the frequency that each infrastructure option was triggered over time across sampled scenarios

+
+Figure 2 +
+

Fig. B.2 Figure 2#

+
+
+
+
+

B.5.1.2. Evaluating Robustness over time#

+

The two water utilities are interested in mainting both supply +reliability and financial stability across the broadest set of plausible +future SOWs. To measure the performance of the infrastructure pathway +policy, they’ve defined five critical performance criteria:

+
    +
  • Reliability > 99%

  • +
  • Restriction Frequency < 20%

  • +
  • Peak Financial Cost < 80% of annual revenue (a measure of debt +service spending)

  • +
  • Worst-case drought management cost < 10% of annual revenue (a measure +of unexpected drought costs)

  • +
  • Unit Cost of Expansion < 5 dollars/kgal

  • +
+

To assess the robustness of the infrastructure pathway policy, the two +utilities apply a satisficing metric, which measures the percentage of +sampled SOWs where the pathway policy meets the peformance criteria:

+

\(R =\frac{1}{N}\sum_{j=1}^{N}\Lambda_{\theta,j}\)

+

Where, \(\Lambda\_{\theta,j}=\)

+
+\[\begin{split}\begin{cases} +1, \quad \textrm{if}\ F(\theta)_{j}\leq \Phi_j \\ +0, \quad \textrm{otherwise} +\end{cases}\end{split}\]
+

And \(\Phi\) is a vector of performance criteria for utility +\(j\), \(\theta\) is the portfolio and \(N\) is the total +number of sampled SOWs.

+

Below, we’ll visualize how robustness for the two utilities evolves over the 45-year planning horizon. We’ll assess robustness across three time periods, near-term (first 10 years), mid-term (22 years) and long term (45 years).

+

We start by loading robustness values for the both utilities. These values are calculated by applying the robustness metric above across 2,000 simulated SOWs. To make this exercise computationally tractable, we’ve precomputed these values, which can be found in the files “short_term_robustness.csv”, “mid_term_robustness.csv” and “long_term_robustness.csv”. These values are calculated using the function “check_rdm_meet_criteria” within the helper functions.

+
import numpy as np
+from matplotlib import pyplot as plt
+from functions.eBook_SD_helpers import check_rdm_meet_criteria, create_sd_input, plot_selected_tree_maps, get_factor_importances, open_exploration
+import seaborn as sns
+
+# load Deeply uncertain factors
+rdm_factors = np.loadtxt('data/DU_Factors.csv', delimiter= ',')
+
+sns.set()
+short_term_robustness = np.loadtxt('data/short_term_robustness.csv', delimiter= ',')
+mid_term_robustness = np.loadtxt('data/mid_term_robustness.csv', delimiter = ',')
+long_term_robustness = np.loadtxt('data/long_term_robustness.csv', delimiter = ',')
+
+# plot robustness over time
+fig =plt.figure(figsize=(9,3))
+plt.plot([10,22,45], [short_term_robustness[5]*100, mid_term_robustness[5]*100,long_term_robustness[5]*100], c='#B19CD9')
+plt.plot([10, 22, 45], [short_term_robustness[11]*100, mid_term_robustness[11]*100, long_term_robustness[11]*100], c= '#43b284')
+plt.scatter([10,22,45], [short_term_robustness[5]*100, mid_term_robustness[5]*100,long_term_robustness[5]*100], s=100, c='#B19CD9')
+plt.scatter([10, 22, 45], [short_term_robustness[11]*100, mid_term_robustness[11]*100, long_term_robustness[11]*100], s=100, c='#43b284')
+plt.xlabel('Time Horizon (yrs)')
+plt.ylabel('Robustness (% SOWs)')
+plt.legend(['Bedford', 'Greene'])
+plt.title('Robustness Over Time')
+plt.ylim([0, 107])
+
+
+
(0.0, 107.0)
+
+
+_images/discovery_4_1.png +
+
+

B.5.1.3. Exploring performance evolution#

+

The figure above reveals that the robustness of both water utilities degrades over time, with Bedford’s robustness declining further than Greene. This suggests that the proposed pathway policy is likely insufficient to meet the long-term needs of the two utilities. But how is the current policy insufficient? To answer that question we examine the performance measures that fail to meet performance criteria for each utility across the three planning horizons.

+
# Plot the type of vulnerability over time
+
+### Bedford ###
+plot_robustness_1 = np.zeros([3,5])
+# Determine the percentage of failure SOWs that violate each criterion (note some SOWS fail multiple criteria, so this may some to >1)
+criteria = ['Reliability', 'Restriction Frequency', 'Peak Financial Cost', 'Worst-case drought\nManagement Cost', 'Stranded Assets']
+plot_robustness_1[0,:] = (1 - short_term_robustness[0:5])/(1-short_term_robustness[5])
+plot_robustness_1[1,:] = (1 - mid_term_robustness[0:5])/(1-mid_term_robustness[5])
+plot_robustness_1[2,:] = (1 - long_term_robustness[0:5])/(1-long_term_robustness[5])
+
+# Plot over time
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,4))
+axes[0].bar(np.arange(5), plot_robustness_1[0,:], color='#B19CD9')
+axes[0].set_xticks(np.arange(5))
+axes[0].set_xticklabels(criteria, rotation='vertical')
+axes[0].set_ylim([0,1])
+axes[0].set_title('10-year Horizon')
+axes[0].set_ylabel('Fraction of failure SOWs')
+axes[1].bar(np.arange(5), plot_robustness_1[1,:], color='#B19CD9')
+axes[1].set_xticks(np.arange(5))
+axes[1].set_xticklabels(criteria, rotation='vertical')
+axes[1].set_ylim([0,1])
+axes[1].set_title('22-year Horizon')
+axes[2].bar(np.arange(5), plot_robustness_1[2,:], color='#B19CD9')
+axes[2].set_xticks(np.arange(5))
+axes[2].set_xticklabels(criteria, rotation='vertical')
+axes[2].set_title('45-year Horizon')
+axes[2].set_ylim([0,1])
+fig.suptitle('Bedford')
+plt.tight_layout()
+
+### Greene ###
+# Determine the percentage of failure SOWs that violate each criterion (note some SOWS fail multiple criteria, so this may some to >1)
+plot_robustness_2 = np.zeros([3, 5])
+plot_robustness_2[0, :] = (1 - short_term_robustness[6:11]) / (1 - short_term_robustness[11])
+plot_robustness_2[1, :] = (1 - mid_term_robustness[6:11]) / (1 - mid_term_robustness[11])
+plot_robustness_2[2, :] = (1 - long_term_robustness[6:11]) / (1 - long_term_robustness[11])
+
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9, 4))
+axes[0].bar(np.arange(5), plot_robustness_2[0, :], color='#43b284')
+axes[0].set_xticks(np.arange(5))
+axes[0].set_xticklabels(criteria, rotation='vertical')
+axes[0].set_title('10-year Horizon')
+axes[0].set_ylim([0,1])
+axes[0].set_ylabel('Fraction of failure SOWs')
+axes[1].bar(np.arange(5), plot_robustness_2[1, :], color='#43b284')
+axes[1].set_xticks(np.arange(5))
+axes[1].set_xticklabels(criteria, rotation='vertical')
+axes[1].set_title('22-year Horizon')
+axes[1].set_ylim([0,1])
+axes[2].bar(np.arange(5), plot_robustness_2[2, :], color='#43b284')
+axes[2].set_xticks(np.arange(5))
+axes[2].set_xticklabels(criteria, rotation='vertical')
+axes[2].set_title('45-year Horizon')
+axes[2].set_ylim([0,1])
+fig.suptitle('Greene')
+plt.tight_layout()
+
+
+_images/discovery_6_0.png +_images/discovery_6_1.png +

In the figures above, we observe that the vulnerability of both utilities changes in different ways. Early in the simulation period, Bedford is vulnerable to failures in reliability (though the robustness figure created in step B5.2 reveals that these failures are very rare). As the simulation period progresses, Bedford’s vulnerability expands to include failures in restriction frequency and worst-case cost. These failures indicate that the utility has an overall inability to manage drought conditions and future conditions progress.

+

Greene shows a very different evolution in vulnerability. Early in the simulation period, failures manifest in the restriction frequency objective, suggesting that the utility must rely on water use restrictions to maintain supply reliability. As the simulation progresses however, the vulnerability evolves. When evaluated across the 45-year planning horizon, a new failure modes emerges - financial failure manifesting in peak financial cost and stranded assets. This suggests that the proposed pathway policy may be over-investing in new infrastructure, straining the utility’s budget with large debt payments that are unnecessary to maintain supply reliability.

+
+
+

B.5.1.4. How do deep uncertainties generate vulnerability#

+

While the evolution of robustness provides insight into how the system +evolves over time, it does not reveal why each utility is vulnerable. +To examine how deep uncertainties generate vulnerability over time for +the two utilities, we perform scenario discovery (factor mapping, +Chapter 4.3). Here we’ll utilize gradient boosted trees to identify +regions of the uncertainty space that cause the utilities to fail to +meet performance criteria.

+
# import the performance data across 2000 SOWs for three time periods
+short_term_performance = np.loadtxt('data/short_term_performance.csv', delimiter= ',')
+mid_term_performance = np.loadtxt('data/mid_term_performance.csv', delimiter = ',')
+long_term_performance = np.loadtxt('data/long_term_performance.csv', delimiter = ',')
+
+satisficing_criteria = [.98, .2, .8, .1, 5]
+
+# transform into scenario discovery input
+short_term_SD_input = create_sd_input(short_term_performance, satisficing_criteria)
+mid_term_SD_input = create_sd_input(mid_term_performance, satisficing_criteria)
+long_term_SD_input = create_sd_input(long_term_performance, satisficing_criteria)
+
+# factor mapping Bedford
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3))
+plot_selected_tree_maps(5, 'short_term', 0, 6, satisficing_criteria, 0, axes[0])
+axes[0].set_title('10-year Horizon')
+plot_selected_tree_maps(5, 'mid_term', 0, 6, satisficing_criteria, 0, axes[1])
+axes[1].set_title('22-year Horizon')
+plot_selected_tree_maps(5, 'long_term', 0, 1, satisficing_criteria, 0, axes[2])
+axes[2].set_title('45-year Horizon')
+fig.suptitle('Bedford Factor Maps')
+plt.tight_layout()
+
+# factor mapping Greene
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3))
+plot_selected_tree_maps(11, 'short_term', 0, 8, satisficing_criteria, 0, axes[0])
+axes[0].set_title('10-year Horizon')
+plot_selected_tree_maps(11, 'mid_term', 0, 6, satisficing_criteria, 0, axes[1])
+axes[1].set_title('22-year Horizon')
+plot_selected_tree_maps(11, 'long_term', 0, 1, satisficing_criteria, 0, axes[2])
+axes[2].set_title('45-year Horizon')
+fig.suptitle('Greene Factor Maps')
+plt.tight_layout()
+
+
+
Factor map for Bedford
+Factor map for Bedford
+Factor map for Bedford
+Factor map for Greene
+Factor map for Greene
+Factor map for Greene
+
+
+_images/discovery_9_1.png +_images/discovery_9_2.png +

In the figures above, we learn more about how the vulnerability of the two utilities evolves over time. Bedford begins with very few possible failures but appears vulnerable to high demand growth scenarios under future scenarios with high demands. When evaluated across a 22-year planning horizon, Bedford is vulnerable when the near-term demand growth is high and water use restrictions are less effective than predicted. Under the full 45-year planning horizon, Bedford is vulnerable to sustained high levels of demand growth, failing if either near-term or mid-term demand growth exceeds expected levels.

+

Greene’s vulnerability evolves differently. It begins with vulnerability to high demand growth, but as the simulation progresses (and infrastructure is constructed), the utility becomes vulnerable to low-demand growth futures which cause the failures in financial criteria shown in section B.5.3. This indicates that the pathway policy over-builds in many SOWs, and becomes financially unstable if demand does not grow sufficiently to provide revenue to cover debt service payments.

+
+
+

B.5.1.5. Which uncertainties have the most influence on time-evolving performance?#

+

The factor maps generated in B.5.4 present the vulnerability generated by the two most important deep uncertainties as determined by Gradient Boosted Trees. Yet the factor prioritization shows that more than two uncertainties are influential to regional performance. Further, we can observe that individual uncertainties have different impacts on each performance obejctive, and these impacts may change over time. In the cells below, explore the impact of deep uncertainty by generating factor maps for different combinations of deep uncertain factors, objectives and time horizons.

+
sns.set_style('white')
+uncertainties = ['D1', 'D2', 'D3', 'BT', 'BM', 'DR', 'RE', 'EV', 'PM', 'CT', 'IA', 'IF', 'IP']
+uncertainties = ['Near-term demand', 'Mid-term demand', 'Long-term demand', 'Bond Term', 'Bond Rate', 'Discount Rate', 'Restriction Effectiveness', 'Evaporation Rate', 'Permitting time', 'Construction time', 'Inflow Amplitude', 'Inflow Frequency', 'Inflow Period']
+
+u1_st_FI = get_factor_importances(short_term_SD_input, rdm_factors, 250, 4, 5)
+u1_mt_FI = get_factor_importances(mid_term_SD_input, rdm_factors, 250, 4, 5)
+u1_lt_FI = get_factor_importances(long_term_SD_input, rdm_factors, 250, 4, 5)
+
+u1_all = np.vstack([u1_st_FI,u1_mt_FI, u1_lt_FI])
+u1_all = np.transpose(u1_all)
+
+# factor ranking -- utility 2
+u2_st_FI = get_factor_importances(short_term_SD_input, rdm_factors, 250, 4, 11)
+u2_mt_FI = get_factor_importances(mid_term_SD_input, rdm_factors, 250, 4, 11)
+u2_lt_FI = get_factor_importances(long_term_SD_input, rdm_factors, 250, 4, 11)
+u2_all = np.vstack([u2_st_FI,u2_mt_FI, u2_lt_FI])
+u2_all = np.transpose(u2_all)
+
+fig, (ax, ax2, cax) = plt.subplots(ncols=3,figsize=(5,5),
+                  gridspec_kw={"width_ratios":[1,1, 0.1]})
+fig.subplots_adjust(wspace=0.3)
+im = ax.imshow(u1_all, cmap='Reds', vmin=0, vmax=.3)
+ax.set_yticks(np.arange(13))
+ax.set_yticklabels(uncertainties)
+ax.set_xticks(np.arange(3))
+ax.set_xlabel('Time Horizon')
+ax.set_title('Bedford')
+
+im1 = ax2.imshow(u2_all, cmap='Reds', vmin=0, vmax=.3)
+ax2.set_yticks(np.arange(13))
+ax2.set_yticklabels([])
+ax2.set_xticks(np.arange(3))
+ax2.set_xlabel('Time Horizon')
+ax2.set_title('Greene')
+fig.colorbar(im, cax=cax, label='Factor Importance')
+plt.tight_layout()
+
+
+_images/discovery_12_0.png +

The Figure above shows the factor importance as determined by gradient boosted trees for both utilities across the three planning horizons. While near-term demand growth is important for both utilities under all three planning horizons, the importance of other factors evolves over time. For example, restriction effectiveness plays an important role for Greene under the 22-year planning horizon but disappears under the 45-year planning horizon. In contrast, the bond interest rate is important for predicting success over the 45-year planning horizon, but does not appear important over the 10- or 22-year planning horizons. These findings highlight how assumptions about the planning period can have a large impact on modeling outcomes.

+
+
+

B.5.1.6. Open exploration#

+

In the cell below, use the function to explore how factor maps change +for the two utilities based upon the uncertainties plotted, the +objectives of interest and the time horizon.

+
# specify the utility ("Bedford" or "Greene")
+utility = "Bedford"
+
+# specify which performance objectives to investigate (note that not all performance objectives have failures, which may result in a blank factor map)
+# set this to one of the following: "Reliability", "Restriction Frequency", "Peak Financial Cost", "Worst Case Cost" or "Unit Cost"
+objective = "Reliability"
+
+# select uncertainties from the following list: 'D1', 'D2', 'D3', 'BT', 'BM', 'DR', 'RE', 'EV', 'PM', 'CT', 'IA', 'IF', 'IP'
+uncertainty_1 = 'D1'
+uncertainty_2 = 'D2'
+
+# The code below will plot factor maps over the three planning horizons for the information above
+fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3))
+open_exploration(utility, objective, 'short_term', uncertainty_1, uncertainty_2, axes[0])
+open_exploration(utility, objective, 'mid_term', uncertainty_1, uncertainty_2, axes[1])
+open_exploration(utility, objective, 'long_term', uncertainty_1, uncertainty_2, axes[2])
+plt.tight_layout()
+
+
+
Factor map for Bedford, reliability
+Factor map for Bedford, reliability
+Factor map for Bedford, reliability
+
+
+_images/discovery_16_1.png +
+
+

B.5.1.7. Tips to apply this methodology to your own problem#

+

In this tutorial, we demonstrated time-evolving scenario discovery for a +cooperative water supply system. To apply this workflow to your own +problem:

+
    +
  1. Choose sampling bounds for your parameters of interest, which will +represent uncertainties that characterize your system.

  2. +
  3. Generate samples for these parameters (this can be done using the +saltelli.sample function as in B.2 or done with another package).

  4. +
  5. Define performance criteria for your problem

  6. +
  7. Evaluate parameter sets through your model, and save performance +measures across multiple time horizons

  8. +
  9. Draw from the supporting code for this tutorial to perform scneario +discovery and visualize results

  10. +
+
+
+

B.5.1.8. References#

+

Trindade, B. C., Reed, P. M., & Characklis, G. W. (2019). Deeply uncertain pathways: Integrated multi-city regional water supply infrastructure investment and portfolio management. Advances in Water Resources, 134, 103442.

+
+
+
+
+

B.6. A Hidden-Markov Modeling Approach to Creating Synthetic Streamflow Scenarios Tutorial#

+
+

Note

+
+
Run the tutorial interactively: HMM Notebook.
+
Please be aware that notebooks can take a couple minutes to launch.
+
To run the notebooks yourself, download the files here and use these requirements.
+
+
+
+

B.6.1. A Hidden-Markov Modeling Approach to Creating Synthetic Streamflow Scenarios#

+

In this notebook, we will be covering the basics of fitting a Hidden +Markov Model-based synthetic streamflow generator for a single site in +the Upper Colorado River Basin. First, we will characterize the observed +historical flow in the basin from 1909-2013. Then, we will fit a +synthetic streamflow generator to the observed flows in the basin in +order to create stationary synthetic flows. Finally, we will create a +non-stationary version of the generator to create flows that could be +representative of plausible future climate in the region. We ultimately +show how to place the synthetically generated flows in the context of +physically-informed CMIP5 projections to compare the two methods.

+
+

B.6.1.1. Background#

+

In the Western United States (US), and particularly the Colorado River +Basin, a recent study used tree-ring reconstructions to suggest that the +megadrought that has been occurring in the Southwest over the past 22 +years is the region’s worst drought since about 800 AD (Williams et al., +2022). The study’s lead author, UCLA climatologist Park Williams, +suggested that had the sequence of wet-dry years occurred without +anthropogenic forcing, the 2000s would have likely still been dry, but +not on the same level as the worst of the last millennium’s +megadroughts.

+

The recent trend of warming and reduced soil moisture in the Southwest +US is highly challenging from a water systems planning and management +perspective for the Colorado River Basin. Given the wide recognition +that the river is over-allocated, the most recent drought highlights the +difficulty of sustaining the flow requirements as dictated by the +Colorado Compact. Thus, there has been an increasing focus in +exploratory modeling efforts to clarify how vulnerable water systems in +this region are to plausible drought streamflow scenarios for the +future. In this tutorial, we’ll discuss how to create these scenarios +using a Hidden Markov Model (HMM)- based streamflow synthetic generator. +As discussed in Section +2.1 +and +4.2 +of the eBook, future climate conditions in the basin represent a deep +uncertainty that can lead to highly consequential water scarcity +outcomes. It is advantageous to create a model such as the HMM-based +generator in order to facilitate the creation of many ensembles of +streamflow that can ultimately be used to force regional water systems +models to understand how variability and drought extremes affect +regional water shortages, operations, and policies.

+
+
+
Lake Powell shows persistent effects from drought (Source: U.S. Bureau of Reclamation)
+
+
+
+

B.6.2. Let’s Get Started!#

+
+

B.6.2.1. Observed Record#

+

First, let’s take a look at the observed data from 1909-2013 for a +specific site. In this example, we use the outlet gauge of the Upper +Colorado River (USGS Gauge 09163500 at the Colorado-Utah state line). +Below, we create a plot of the annual streamflow.

+
# Import libraries
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import matplotlib.patches as patches
+import numpy as np
+import pandas as pd
+from random import random
+from SALib.sample import latin
+from scipy import stats as ss
+import statistics
+import statsmodels.api as sm
+
+
+# Import helper functions from local package
+from functions import fitmodel
+from functions import plotstates
+from functions import plotdist
+
+
+
# Read in annual historical data
+AnnualQ = pd.read_csv('data/uc_historical.csv')
+AnnualQ['Year'] = list(range(1909, 2014))
+
+# Plot a line graph
+fig, ax = plt.subplots(figsize=(12, 8))
+ax.plot(AnnualQ.iloc[:, 1],
+        AnnualQ.iloc[:, 0],
+        color='#005F73',
+        label='Annual')
+
+# Add labels and title
+ax.set_title("Upper Colorado Annual Flow")
+ax.set_xlabel("Year", fontsize=16)
+ax.set_ylabel("Annual Flow (cubic feet per year)", fontsize=16)
+plt.xticks(fontsize=12)
+plt.yticks(fontsize=12)
+mpl.rc('legend', fontsize=16)
+legend = plt.legend(loc="upper right")
+plt.show()
+plt.close()
+
+
+_images/hmm_9_0.png +

Let’s calculate an 11-year rolling mean of the same data to get a sense +of long-term trends.

+
fig, ax = plt.subplots(figsize=(12, 8))
+
+# Plot the original line graph
+plt.plot(AnnualQ.iloc[:,1],
+         AnnualQ.iloc[:,0],
+         color='#005F73',
+         label='Annual')
+
+# Plot an 11-year rolling mean
+plt.plot(AnnualQ.iloc[:, 1].rolling(11).mean(),
+         AnnualQ.iloc[:, 0].rolling(11).mean(),
+         color='#183A2E',
+         label='11-Year Rolling Mean')
+
+# Add labels and title
+plt.title("Upper Colorado Annual Flow")
+ax.set_xlabel("Year",fontsize=16)
+ax.set_ylabel("Annual Flow (cubic feet per year)", fontsize=16)
+plt.xticks(fontsize=12)
+plt.yticks(fontsize=12)
+mpl.rc('legend', fontsize=16)
+legend = plt.legend()
+plt.show()
+plt.close()
+
+
+_images/hmm_11_0.png +

The Colorado Compact, which prescribes flows between the Upper and Lower +Colorado Basins, was negotiated using data prior to 1922, a time period +revealed by the above figure to be one of the consistently wetter +periods on record. It’s clear today that since the 1980s, the Southwest +US has been experiencing aridification (Overpeck et al., 2020) and that +this observed record alone isn’t an accurate representation of what +future climate might look like in this region.

+

Let’s get a little more specific and formally quantify decadal droughts +that have occurred in the observed period. We use a metric proposed in +Ault et al. (2014). The authors define a decadal drought as when the +11-year rolling mean falls below a threshold that is 1/2 a standard +deviation below the overall mean of the record. We can then highlight +the block of years that fall in a decadal drought using yellow +rectangles below.

+
# Define drought threshold
+std = statistics.stdev(AnnualQ.iloc[:, 0])
+threshold = np.mean(AnnualQ.iloc[:, 0] - (0.5 * std))
+
+# Find where the rolling mean dip below the threshold?
+drought_instances = [i for i, v in enumerate(AnnualQ.iloc[:,0].rolling(11).mean()) if v < threshold]
+drought_years = AnnualQ.iloc[:, 1].rolling(11).mean()[drought_instances]
+
+# Add labels and title
+fig, ax = plt.subplots(figsize=(12, 8))
+ax.plot(AnnualQ.iloc[:,1],
+        AnnualQ.iloc[:,0],
+        color='#005F73',
+        label='Annual')
+
+ax.plot(AnnualQ.iloc[:,1].rolling(11,center=True).mean(),
+        AnnualQ.iloc[:,0].rolling(11,center=True).mean(),
+        color='#183A2E',
+        label='11-Year Rolling Mean')
+
+ax.axhline(y=threshold,
+           color='black',
+           linestyle='--',
+           label='Drought Threshold')
+
+# Visualize the drought periods as yellow rectangles
+for i in drought_years:
+
+    # Plot a box centered around those values and with 5 years on either side.
+    rect = patches.Rectangle((i-5,0), 11,2e7, linewidth=1, edgecolor='#EFE2BE', facecolor='#EFE2BE')
+
+    # Add the patch to the Axes
+    ax.add_patch(rect)
+
+
+plt.title("Upper Colorado Annual Flow")
+ax.set_xlabel("Year", fontsize=16)
+ax.set_ylabel("Annual Flow (cubic feet per year)", fontsize=16)
+plt.xticks(fontsize=12)
+plt.yticks(fontsize=12)
+mpl.rc('legend', fontsize=16)
+legend = plt.legend()
+plt.show()
+plt.close()
+
+
+_images/hmm_14_0.png +

By this metric, the Upper Colorado Basin region has experienced two +decadal droughts over the last century.

+
+
+

B.6.2.2. Synthetic Stationary Generator to Better Quantify Natural Variability#

+

It is important to remember that the streamflow that we have observed in +the region over the last century is only one instance of the hydrology +that could occur since the atmosphere is an inherently stochastic +system. Thus, we require a tool that will allow us to see multiple +plausible realizations of the streamflow record to understand the +internal variability that characterizes the historical period. One +observed realization of historical streamflow is limited in its ability +to capture rare extremes; plausible (but not observed) alternative +instances of streamflow records can help to fill this gap. The tool that +we use to develop synthetic flows for the region is a Gaussian Hidden +Markov Model (HMM). If a system follows a Markov process, it switches +between a number of “hidden states” dictated by a transition matrix. +Each state has its own Gaussian probability distribution (defined by a +mean and standard deviation) and one can draw from this distribution to +create synthetic flows that fit the properties of the historical +distribution. HMMs are an attractive choice for this region because they +can simulate persistence (i.e., long duration droughts), which is a +characteristic of the region’s hydro-climatology. The figure below shows +an example of a 2-state Gaussian HMM that we will be fitting for this +example.

+
+
+
Two-state Gaussian HMM with mean and standard deviation parameters
+

Below is the code that fits the HMM model to the last 2/3 of the +historical record of log annual flows at the CO-UT stateline gauge and +creates an alternative trace of 105 years. A subset of the dataset is +chosen in order to minimize overfitting and to retain a set of data for +validation of the model. When we fit our model, we utilize the +Baum-Welch algorithm (a special version of the expectation-maximization +algorithm) to find the optimal parameters that maximize the likelihood +of seeing the observed flows. Ultimately, the algorithm will return a +mean and standard deviation associated with each state (mus and sigmas +defined below) and a 2x2 transition probability matrix that captures the +likelihood of transitioning between states (P). We can also retrieve the +annual hidden states across the observed series, also known as the +Viterbi sequence of states, which classifies each year in a “wet” or +“dry” state.

+
# Number of years for alternative trace
+n_years = 105
+
+# Import historical data that it used to fit HMM model
+AnnualQ_h = pd.read_csv('data/uc_historical.csv')
+
+# Fit the model and pull out relevant parameters and samples
+logQ = np.log(AnnualQ_h)
+hidden_states, mus, sigmas, P, logProb, samples, model = fitmodel.fitHMM(logQ, n_years)
+
+
+

We’ve fit our HMM, but what does the model look like? Let’s plot the +annual time series of hidden states, or the Viterbi sequence. In the +code, above, we have defined that the drier state is always represented +by state 0. Thus, we know that hidden_states = 0 corresponds to the dry +state and hidden_states = 1 to the wet state.

+
# Plot Vitebi sequence
+plotstates.plotTimeSeries(np.log(AnnualQ.iloc[:,0]), hidden_states, 'Annual Flow (cube feet per year)')
+
+
+_images/hmm_21_0.png +

In the figure above, we see that the years with the higher log flows +tend to be classified in a “wet” state and the opposite is true of the +“dry” state. We can also print the transition matrix, which shows the +likelihood of transitioning between states. Note that the system has a +high likelihood of persisting in the same state.

+
print(model.transmat_)
+
+
+
[[0.65095026 0.34904974]
+ [0.3205531  0.6794469 ]]
+
+
+

Let’s also plot the distribution of log annual flows associated with the +wet and dry states.

+
# Plot wet and dry state distributions
+plotdist.plotDistribution(logQ, mus, sigmas, P)
+
+
+_images/hmm_25_0.png +

The wet state distribution is characterized by a greater mean flow, but +note that there is significant overlap in the tails of the distributions +below which demonstrates why years with similiar flows can be classified +in different states.

+

Now let’s see what the drought dynamics look like in the synthetic +scenario that we created using the same definition that we had used for +the historical period.

+
# Retrieve samples and back-transform out of log space
+AnnualQ_s = np.exp(samples[0])
+AnnualQ_s = pd.DataFrame(AnnualQ_s)
+AnnualQ_s['Year'] = list(range(1909, 2014))
+
+# Define drought threshold
+std=statistics.stdev(AnnualQ_s.iloc[:, 0])
+threshold=np.mean(AnnualQ_s.iloc[:, 0] - (0.5 * std))
+
+# Where does the rolling mean dip below the threshold
+drought_instances = [i for i,v in enumerate(AnnualQ_s.iloc[:, 0].rolling(11).mean()) if v < threshold]
+drought_years = AnnualQ_s.iloc[:, 1].rolling(11).mean()[drought_instances]
+
+#Visualize the streamflow scenario
+fig, ax = plt.subplots(figsize=(12, 8))
+
+#Plot the original line graph
+ax.plot(AnnualQ_s.iloc[:,1],
+        AnnualQ_s.iloc[:,0],
+        color='#005F73',
+        label='Annual')
+
+#Plot a 11-year rolling mean
+ax.plot(AnnualQ_s.iloc[:,1],
+        AnnualQ_s.iloc[:,0].rolling(11, center=True).mean(),
+        color='#183A2E',
+        label='11-Year Rolling Mean')
+
+# Add labels and title
+ax.axhline(y=threshold,
+           color='black',
+           linestyle='--',
+           label='Drought Threshold')
+
+
+for i in drought_years:
+
+    #Plot a box centered around those values and with 5 years on either side.
+    rect = patches.Rectangle((i - 5,
+                              0),
+                              11,
+                              2e7,
+                              linewidth=1,
+                              edgecolor='#EFE2BE',
+                              facecolor='#EFE2BE')
+
+    # Add the patch to the Axes
+    ax.add_patch(rect)
+
+
+plt.title("Upper Colorado Annual Flow (Synthetic Stationary)",fontsize=16)
+plt.xlabel("Year", fontsize=16)
+plt.ylabel("Annual Flow (cubic feet per year)", fontsize=16)
+mpl.rc('legend', fontsize=16)
+plt.legend()
+plt.xticks(fontsize=12)
+plt.yticks(fontsize=12)
+plt.show()
+plt.close()
+
+
+_images/hmm_28_0.png +

You can sample from the model and create more 105-year traces and note +how the location and number of decadal droughts changes. This +demonstrates how different the historical record can look just within +the range of natural variability. It’s also important to remember that +when droughts occur can also define the ultimate effect of the drought +(i.e. is it a time when there is a large population growth or a time +when humans can adapt by conserving or building more infrastructure?). A +hydrologic drought need not manifest into an agricultural or operational +drought of the same magnitude if stored surface water is available.

+

We externally run the HMM many times to create a dataset of 100 +instances of the 105-year traces and 1000 instances of the 105-year +traces that are available in the package +(“synthetic_stationary_small_sample_100.csv”,“synthetic_stationary_large_sample_1000”). +The shaded green lines correspond to the flow duration curves (FDCs) for +the generated streamflow traces in comparison with the FDC of the +historical record in beige.

+
+
+
Generated streamflow traces in comparison with the FDC of the historical record.
+

As expected, the stationary synthetic FDCs envelope the historical FDC +and particularly, the synthetic traces offer many more instances of low +flow conditions that could lead to more extreme drought conditions than +what has been observed historically. It is also useful to check for +convergence of samples and to determine how many samples are needed to +fully represent internal variability. Above we see that the extension to +1000 instances of 105-year traces fills out regions of the FDC, +including creating some more extreme drought conditions, but that +additional samples will likely not fill out the FDC substantially more.

+
+
+

B.6.2.3. Non-Stationary Synthetic Generator to Impose Climate Changes#

+

Now, we create flows under non-stationary conditions to get a better +understanding of what flows can look like under climate changes. In +order to create flows under non-stationary conditions, we can toggle the +parameters of the HMM model in order to create systematic changes to the +model that can represent a changing climate. The HMM has 6 parameters +that define it. When we fit the historical model, the parameters that +are fit represent a baseline parameter value. In this non-stationary +generator, we define a range to sample these parameters from.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Parameter

Current Value

Lower Bound

Upper Bound

Log-Space Wet State +Mean Multiplier

1.00

0.98

1.02

Log-Space Dry State +Mean Multiplier

1.00

0.98

1.02

Log-Space Wet State +Standard Deviation +Multiplier

1.00

0.75

1.25

Log-Space Dry State +Standard Deviation +Multiplier

1.00

0.75

1.25

Change in Dry-Dry +Transition +Probability

0.00

-0.30

+0.30

Change in Wet-Wet +Transition +Probability

0.00

-0.30

+0.30

+
+

Now let’s sample 1000 times from these bounds to create 1000 new +parameterizations of the model. Here we use SALib and the Latin +Hypercube sample function.

+
# Create problem structure with parameters that we want to sample
+problem = {
+    'num_vars': 6,
+    'names': ['wet_mu', 'dry_mu', 'wet_std','dry_std','dry_tp',"wet_tp"],
+    'bounds': [[0.98, 1.02],
+               [0.98, 1.02],
+               [0.75,1.25],
+               [0.75,1.25],
+               [-0.3,0.3],
+               [-0.3,0.3]]
+}
+
+# generate 1000 parameterizations
+n_samples = 1000
+
+# set random seed for reproducibility
+seed_value = 123
+
+# Generate our samples
+LHsamples = latin.sample(problem, n_samples, seed_value)
+
+
+

Now let’s look at what some of the traces look like in our +non-stationary generator. Let’s choose a random instance from the +1000-member space and adjust the parameters accordingly.

+
# Define static parameters
+n_years = 105
+
+# Sample parameter; Adjust to any sample number from 0-999
+sample = 215
+
+# Create empty arrays to store the new Gaussian HMM parameters for each SOW
+Pnew = np.empty([2,2])
+piNew = np.empty([2])
+musNew_HMM = np.empty([2])
+sigmasNew_HMM = np.empty([2])
+logAnnualQ_s = np.empty([n_years])
+
+# Calculate new transition matrix and stationary distribution of SOW at last node as well as new means and standard deviations
+Pnew[0, 0] = max(0.0, min(1.0, P[0, 0] + LHsamples[sample][4]))
+Pnew[1, 1] = max(0.0, min(1.0, P[1, 1] + LHsamples[sample][5]))
+Pnew[0, 1] = 1 - Pnew[0, 0]
+Pnew[1, 0] = 1 - Pnew[1, 1]
+eigenvals, eigenvecs = np.linalg.eig(np.transpose(Pnew))
+one_eigval = np.argmin(np.abs(eigenvals - 1))
+piNew = np.divide(np.dot(np.transpose(Pnew), eigenvecs[:, one_eigval]),
+                  np.sum(np.dot(np.transpose(Pnew), eigenvecs[:,one_eigval])))
+
+musNew_HMM[0] = mus[0] * LHsamples[sample][1]
+musNew_HMM[1] = mus[1] * LHsamples[sample][0]
+sigmasNew_HMM[0] = sigmas[0] * LHsamples[sample][3]
+sigmasNew_HMM[1] = sigmas[1] * LHsamples[sample][2]
+
+# Generate first state and log-space annual flow at last node
+states = np.empty([n_years])
+if random() <= piNew[0]:
+    states[0] = 0
+    logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0])
+else:
+    states[0] = 1
+    logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1])
+
+# Generate remaining state trajectory and log space flows at last node
+for j in range(1, n_years):
+    if random() <= Pnew[int(states[j-1]), int(states[j-1])]:
+        states[j] = states[j-1]
+    else:
+        states[j] = 1 - states[j-1]
+
+    if states[j] == 0:
+        logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0])
+    else:
+        logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1])
+
+# Convert log-space flows to real-space flows
+AnnualQ_s = np.exp(logAnnualQ_s)-1
+
+
+

Now let’s see what this synthetic trace looks like.

+
# Retrieve samples and back-transform out of log space
+AnnualQ_s = pd.DataFrame(AnnualQ_s)
+AnnualQ_s['Year'] = list(range(1909, 2014))
+
+# Define drought threshold
+std = statistics.stdev(AnnualQ_s.iloc[:, 0])
+threshold = np.mean(AnnualQ_s.iloc[:, 0] - (0.5 * std))
+
+# Where does the rolling mean dip below the threshold
+drought_instances = [i for i, v in enumerate(AnnualQ_s.iloc[:, 0].rolling(11).mean()) if v < threshold]
+drought_years = AnnualQ_s.iloc[:, 1].rolling(11).mean()[drought_instances]
+
+# Visualize the streamflow scenario
+fig, ax = plt.subplots(figsize=(12, 8))
+
+# Plot the original line graph
+ax.plot(AnnualQ_s.iloc[:,1],
+        AnnualQ_s.iloc[:,0],
+        color='#005F73',
+        label='Annual')
+
+# Plot a 11-year rolling mean
+ax.plot(AnnualQ_s.iloc[:, 1],
+        AnnualQ_s.iloc[:, 0].rolling(11, center=True).mean(),
+        color='#183A2E',
+        label='11-Year Rolling Mean')
+
+# Add labels and title
+ax.axhline(y=threshold,
+           color='black',
+           linestyle='--',
+           label='Drought Threshold')
+
+
+for i in drought_years:
+
+    # Plot a box centered around those values and with 5 years on either side.
+    rect = patches.Rectangle((i - 5,0),
+                             11,
+                             2e7,
+                             linewidth=1,
+                             edgecolor='#EFE2BE',
+                             facecolor='#EFE2BE')
+
+    # Add the patch to the Axes
+    ax.add_patch(rect)
+
+
+plt.title("Annual Flow (Synthetic Non-Stationary)", fontsize=16)
+plt.xlabel("Year", fontsize=16)
+plt.ylabel("Annual Flow (cubic feet per year)", fontsize=16)
+plt.xticks(fontsize=12)
+plt.yticks(fontsize=12)
+mpl.rc('legend', fontsize=16)
+legend = plt.legend(loc="upper right")
+plt.show()
+plt.close()
+
+
+_images/hmm_40_0.png +

Above is the example trace from the new non-stationary model. You may +see fewer or more decadal drought instances. We can further summarize +overall decadal drought characteristics across the samples. Let’s plot a +histogram of the total number of times we go below the drought threshold +across these realizations.

+
decadal_drought_occurence=np.empty([1000])
+
+for y in range(1000):
+
+    # Create empty arrays to store the new Gaussian HMM parameters for each SOW
+    Pnew = np.empty([2, 2])
+    piNew = np.empty([2])
+    musNew_HMM = np.empty([2])
+    sigmasNew_HMM = np.empty([2])
+    logAnnualQ_s = np.empty([n_years])
+
+    # Calculate new transition matrix and stationary distribution of SOW at last node
+    # as well as new means and standard deviations
+
+    Pnew[0, 0] = max(0.0,min(1.0, P[0, 0] + LHsamples[y][4]))
+    Pnew[1, 1] = max(0.0,min(1.0, P[1, 1] + LHsamples[y][5]))
+    Pnew[0, 1] = 1 - Pnew[0, 0]
+    Pnew[1, 0] = 1 - Pnew[1, 1]
+    eigenvals, eigenvecs = np.linalg.eig(np.transpose(Pnew))
+    one_eigval = np.argmin(np.abs(eigenvals - 1))
+    piNew = np.divide(np.dot(np.transpose(Pnew), eigenvecs[:, one_eigval]),
+                      np.sum(np.dot(np.transpose(Pnew), eigenvecs[:, one_eigval])))
+
+    musNew_HMM[0] = mus[0] * LHsamples[y][1]
+    musNew_HMM[1] = mus[1] * LHsamples[y][0]
+    sigmasNew_HMM[0] = sigmas[0] * LHsamples[y][3]
+    sigmasNew_HMM[1] = sigmas[1] * LHsamples[y][2]
+
+    # Generate first state and log-space annual flow at last node
+    states = np.empty([n_years])
+    if random() <= piNew[0]:
+        states[0] = 0
+        logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0])
+    else:
+        states[0] = 1
+        logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1])
+
+    # generate remaining state trajectory and log space flows at last node
+    for j in range(1, n_years):
+        if random() <= Pnew[int(states[j-1]), int(states[j-1])]:
+            states[j] = states[j-1]
+        else:
+            states[j] = 1 - states[j-1]
+
+        if states[j] == 0:
+            logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0])
+        else:
+            logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1])
+
+    # Convert log-space flows to real-space flows
+    AnnualQ_s = np.exp(logAnnualQ_s) - 1
+    AnnualQ_s = pd.DataFrame(AnnualQ_s)
+    AnnualQ_s['Year'] = list(range(1909, 2014))
+
+    # Define drought threshold
+    std = statistics.stdev(AnnualQ_s.iloc[:, 0])
+    threshold = np.mean(AnnualQ_s.iloc[:, 0] - (0.5 * std))
+
+    # Where does the rolling mean dip below the threshold
+    drought_instances = [i for i, v in enumerate(AnnualQ_s.iloc[:, 0].rolling(11).mean()) if v < threshold]
+    decadal_drought_occurence[y] = len(drought_instances)
+
+
+
fig, ax = plt.subplots(figsize=(12, 8))
+ax.hist(decadal_drought_occurence,label='Non-Stationary generator',color="#005F73")
+ax.set_xlabel('Number of Instances of Decadal Drought',fontsize=16)
+ax.set_ylabel('Frequency',fontsize=16)
+ax.axvline(x=2, color='r', linestyle='-',label='Observed')
+mpl.rc('legend', fontsize = 16)
+plt.xticks(fontsize = 12)
+plt.yticks(fontsize = 12)
+plt.show()
+plt.close()
+
+
+_images/hmm_43_0.png +

Note how many more instances of the decadal droughts we are creating +with the non-stationary generator than our observed 105-year trace which +creates a rich space in which we can test our models. Just as we did +with the stationary generator, we can externally run the non-stationary +generator to create 10,000 instances of the 105-year traces that are +available in the package +(“synthetic_nonstationary_large_sample_10000.csv”). The shaded green and +blue lines correspond to the FDCs for the stationary and non-stationary +generated streamflow traces in comparison with the FDC of the historical +record in beige. Note how the non-stationary generator produces even +more drought extremes than the stationary non-synthetic traces.

+
+
+
Generated streamflow traces in comparison with the FDC of the historical record.
+
+
+

B.6.2.4. Placing CMIP5 Projections in the Context of Non-Stationary Flows#

+

We have broadened the drought conditions that we are creating which that +can be very useful to understand how our water systems model performs +under potentially extreme scenarios. However, it’s useful to compare our +bottom-up synthetically generated flows in the context of global +physically-driven CMIP5 projections to get a better understanding of how +the two approaches compare. We first aquire 97 CMIP5 projections from +the Colorado River Water Availability Study (CWCB, 2012). In each of +these projections, monthly precipitation factor changes and temperature +delta changes were computed between mean projected 2035–2065 climate +statistics and mean historical climate statistics from 1950–2013. These +97 different combinations of 12 monthly precipitation multipliers and 12 +monthly temperature delta shifts were applied to historical +precipitation and temperature time series from 1950–2013. The resulting +climate time series were run through a Variable Infiltration Capacity +(VIC) model of the UCRB, resulting in 97 time series of projected future +streamflows at the Colorado‐Utah state line.

+

We fit an HMM to each trace of projected streamflow and get a set of +corresponding HMM parameters. Then we take the ratio between these +parameters and the baseline HMM parameters that we calculated earlier in +the notebook in order to calculate the multipliers associated with each +CMIP5 projection. This is all done externally, so we import the +resulting multipliers in the next line.

+
# Read in CMIP5 and paleo multipliers
+CMIP5_multipliers = pd.read_csv('data/CMIP5_SOWs.txt', header=None, sep=" ")
+
+
+

Let’s plot a response surface that will allow us to see how combinations +of HMM parameters tend to influence decadal drought. In order to get a +continuous surface, we’ll fit a non-linear regression to the parameter +values and then predict the decadal drought over a set of grid points. +We fit the response surface for two parameters that should have an +affect on decadal drought: the dry distribution mean and the dry-dry +transition probabilites.

+
# Choose two parameters to fit the response surface for
+mu_dry=[i[1] for i in LHsamples]
+tp_dry=[i[4] for i in LHsamples]
+
+# Create an interpolation grid
+xgrid = np.arange(np.min(mu_dry),
+                  np.max(mu_dry),
+                  (np.max(mu_dry) - np.min(mu_dry)) / 100)
+
+ygrid = np.arange(np.min(tp_dry),
+                  np.max(tp_dry),
+                  (np.max(tp_dry) - np.min(tp_dry)) / 100)
+
+# Fit regression
+d = {'Dry_Tp': tp_dry,
+     'Dry_Mu': mu_dry,
+     'Drought_Occurrence':decadal_drought_occurence}
+
+df = pd.DataFrame(d)
+df['Intercept'] = np.ones(np.shape(df)[0])
+df['Interaction'] = df['Dry_Tp'] * df['Dry_Mu']
+cols = ['Intercept'] + ['Dry_Mu'] + ['Dry_Tp'] + ['Interaction']
+ols = sm.OLS(df['Drought_Occurrence'], df[cols])
+result = ols.fit()
+
+# Calculate drought occurence for each grid point
+X, Y = np.meshgrid(xgrid, ygrid)
+x = X.flatten()
+y = Y.flatten()
+grid = np.column_stack([np.ones(len(x)), x, y, x * y])
+z = result.predict(grid)
+z[z < 0.0] = 0.0 # replace negative shortage predictions with 0
+
+
+

Let’s plot our results:

+
# Set color gradient for response surface
+drought_map = mpl.cm.get_cmap('RdBu_r')
+
+# Reshape our predicted drought occurrence and define bounds of colors
+Z = np.reshape(z, np.shape(X))
+vmin = np.min([np.min(z), np.min(df['Drought_Occurrence'].values)])
+vmax = 15
+norm = mpl.colors.Normalize(vmin, vmax)
+
+# Plot response surface and CMIP5 projections
+fig, ax = plt.subplots(figsize=(12, 8))
+ax.contourf(X, Y, Z, cmap=drought_map, norm=norm)
+ax.scatter(CMIP5_multipliers.iloc[:,7],
+           CMIP5_multipliers.iloc[:,12],
+           c='#ffffb3',
+           edgecolor='none',
+           s=30)
+cbar = ax.figure.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=drought_map), ax=ax)
+ax.set_xlim(np.nanmin(X), np.nanmax(X))
+ax.set_ylim(np.nanmin(Y), np.nanmax(Y))
+ax.set_xlabel('Dry State Mu', fontsize=14)
+ax.set_ylabel('Dry-Dry Transition Probability', fontsize=14)
+ax.tick_params(axis='both', labelsize=14)
+cbar.ax.set_ylabel('Decadal Drought Occurrence', rotation=-90, fontsize=14, labelpad=15)
+cbar.ax.tick_params(axis='y',labelsize=14)
+plt.show()
+plt.close()
+
+
+_images/hmm_53_0.png +

We see the influence of the dry state mean and dry-dry transition +parameters. We’re likely to see more decadal droughts when we (1) +increase the dry-dry transition probability, which inherently will +increase persistence of the dry state, and (2) when we make the dry +state log mean drier. Note that the CMIP5 scenarios tend to span the +extent of the dry mean sample space, but are less representative of the +dry transition probability sample space, which suggests that the types +of hydrological droughts represented in the projections tend to only be +wetter to slightly drier than our baseline. Both methods of producing +these scenarios are valid, though studies have suggested that +globally-resolved GCMs may be inappropriate to represent regional +extremes. Ultimately, if your goal is to produce a variety of ensembles +that are characterized by many different drought characteristics, you +will likely find that a generator approach will serve this purpose +better.

+
+
+

B.6.2.5. Tips to Create an HMM-Based Generator for your System#

+

In this tutorial, we demonstrated how to fit an HMM-based generator for +a single gauge located in the Upper Colorado River Basin. In order to +apply this methodology to your problem, you will need to first ask:

+
    +
  1. Is this model appropriate for my location of interest? We have +applied this style of generator to locations where persistent wet +and dry states are characteristic, which tends to be in the Western +US. Ultimately the best way to judge if an HMM is useful for your +application is to fit the model and explore the resulting +distributions. Are there two (or more) distinct states that emerge? +If not, then your location may not exhibit the type of persistence +that an HMM-based generator is useful for. You can consider +exploring other styles of generators such as the Kirsch-Nowak +generator (Kirsch et al., 2013).

  2. +
  3. Do I have the right datasets? We use annual data for our location of +interest. In this notebook, the HMM is fit to log annual flows. +Ultimately, it can be disaggregated to daily flows (using a +reference historical daily dataset) to be useful in water resources +operational applications. You could also disaggregate to a finer +resolution than daily if the historical dataset exists.

  4. +
+

If you meet these requirements, feel free to proceed through fitting the +model using the code available in the notebook. Be sure to consider the +appropirate number of samples to generate (both in a stationary and +non-stationary case). Make sure that you test multiple sample sizes and +continue to increase your sample size until you converge to a consistent +representation of extremes. What is the appropriate number of LHS +samples of the parameters to use? In this experiment we used 1,000 +samples of parameters due to extensive stability tests described in +Quinn et al. (2020).

+

Finally, to learn more about this test case refer to Hadmichael et +al. (2020a) and Hadmichael et al. (2020b). For another study on +synthetic drought generation to support vulnerability assessments in the +Research Triangle region of North Carolina, please refer to Herman et +al. (2016)

+
+
+

B.6.2.6. References#

+

Ault, T. R., Cole, J. E., Overpeck, J. T., Pederson, G. T., & Meko, D. +M. (2014). Assessing the risk of persistent drought using climate model +simulations and paleoclimate data. Journal of Climate, 27(20), +7529-7549.

+

CWCB (2012).Colorado River Water Availability Study Phase I Report. +Colorado Water Conservation Board

+

Hadjimichael, A., Quinn, J., Wilson, E., Reed, P., Basdekas, L., Yates, +D., & Garrison, M. (2020a). Defining robustness, vulnerabilities, and +consequential scenarios for diverse stakeholder interests in +institutionally complex river basins. Earth’s Future, 8(7), +e2020EF001503.

+

Hadjimichael, A., Quinn, J., & Reed, P. (2020). Advancing diagnostic +model evaluation to better understand water shortage mechanisms in +institutionally complex river basins. Water Resources Research, 56(10), +e2020WR028079.

+

Herman, J. D., Zeff, H. B., Lamontagne, J. R., Reed, P. M., & +Characklis, G. W. (2016). Synthetic drought scenario generation to +support bottom-up water supply vulnerability assessments. Journal of +Water Resources Planning and Management, (11), 04016050.

+

Kirsch, B. R., Characklis, G. W., & Zeff, H. B. (2013). Evaluating the +impact of alternative hydro-climate scenarios on transfer agreements: +Practical improvement for generating synthetic streamflows. Journal of +Water Resources Planning and Management, 139(4), 396-406.

+

Overpeck, J.T. & Udall, B. (2020) “Climate change and the aridification +of North America.” Proceedings of the national academy of sciences +117.22 11856-11858.

+

Quinn, J. D., Hadjimichael, A., Reed,P. M., & Steinschneider, S. (2020). +Canexploratory modeling of water scarcity vulnerabilities and robustness +bescenario neutral?Earth’s Future,8,e2020EF001650. +https://doi.org/10.1029/2020EF001650Received

+

Williams, A. P., Cook, B. I., & Smerdon, J. E. (2022). Rapid +intensification of the emerging southwestern North American megadrought +in 2020–2021. Nature Climate Change, 12(3), 232-234.

+
+
+
+
+ + +
+ + + + + + + + +
+ + + +
+ + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/A3_plotting_code.html b/dev/docs/html/A3_plotting_code.html new file mode 100644 index 0000000..0abaed1 --- /dev/null +++ b/dev/docs/html/A3_plotting_code.html @@ -0,0 +1,1045 @@ + + + + + + + + + + + 3. Plotting Code Samples — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + + + + + + +
+ +
+

C. Plotting Code Samples#

+
+

C.1. hymod.ipynb#

+

The following are the plotting functions as described in the hymod.ipynb Jupyter notebook tutorial.

+

The following are the necessary package imports to run these functions:

+
import numpy as np
+import seaborn as sns
+import matplotlib.pyplot as plt
+
+from matplotlib.lines import Line2D
+
+
+
+

C.1.1. plot_observed_vs_simulated_streamflow()#

+
def plot_observed_vs_simulated_streamflow(df, hymod_dict, figsize=[12, 6]):
+    """Plot observed versus simulated streamflow.
+
+    :param df:              Dataframe of hymod input data including columns for precip, potential evapotranspiration,
+                            and streamflow
+
+    :param hymod_dict:      A dictionary of hymod outputs
+    :type hymod_dict:       dict
+
+    :param figsize:         Matplotlib figure size
+    :type figsize:          list
+
+    """
+
+    # set plot style
+    plt.style.use('seaborn-white')
+
+    # set up figure
+    fig, ax = plt.subplots(figsize=figsize)
+
+    # plot observed streamflow
+    ax.plot(range(0, len(df['Strmflw'])), df['Strmflw'], color='pink')
+
+    # plot simulated streamflow
+    ax.plot(range(0, len(df['Strmflw'])), hymod_dict['Q'], color='black')
+
+    # set axis labels
+    ax.set_ylabel('Streamflow($m^3/s$)')
+    ax.set_xlabel('Days')
+
+    # set plot title
+    plt.title('Observed vs. Simulated Streamflow')
+
+    return ax
+
+
+
+
+

C.1.2. plot_observed_vs_sensitivity_streamflow()#

+
def plot_observed_vs_sensitivity_streamflow(df_obs, df_sim, figsize=[10, 4]):
+    """Plot observed streamflow versus simulations generated from sensitivity analysis.
+
+    :param df_obs:          Dataframe of mean monthly hymod input data including columns for precip,
+                            potential evapotranspiration, and streamflow
+
+    :param df_sim:          Dataframe of mean monthly simulation data from sensitivity analysis
+
+    :param figsize:         Matplotlib figure size
+    :type figsize:          list
+
+    """
+
+    month_list = range(len(df_sim))
+
+    # set up figure
+    fig, ax = plt.subplots(figsize=figsize)
+
+    # set labels
+    ax.set_xlabel('Days')
+    ax.set_ylabel('Flow Discharge (m^3/s)')
+
+    # plots all simulated streamflow cases under different sample sets
+    for i in df_sim.columns:
+        plt.plot(month_list, df_sim[i], color="pink", alpha=0.2)
+
+    # plot observed streamflow
+    plt.plot(month_list, df_obs['Strmflw'], color="black")
+
+    plt.title('Observed vs. Sensitivity Analysis Outputs')
+
+    return ax
+
+
+
+
+

C.1.3. plot_monthly_heatmap()#

+
def plot_monthly_heatmap(arr_sim, df_obs, title='', figsize=[14, 6]):
+    """Plot a sensitivity metric overlain by observed flow.
+
+    :param arr_sim:         Numpy array of simulated metrics
+
+    :param df_obs:          Dataframe of mean monthly observed data from sensitivity analysis
+
+    :param title:           Title of plot
+    :type title:            str
+
+    :param figsize:         Matplotlib figure size
+    :type figsize:          list
+
+    """
+
+    # set up figure
+    fig, ax = plt.subplots(figsize=figsize)
+
+    # plot heatmap
+    sns.heatmap(arr_sim,
+                ax=ax,
+                yticklabels=['Kq', 'Ks', 'Alp', 'Huz', 'B'],
+                cmap=sns.color_palette("ch:s=-.2,r=.6"))
+
+    # setup overlay axis
+    ax2 = ax.twinx()
+
+    # plot line
+    ax2.plot(np.arange(0.5, 12.5), df_obs['Strmflw'], color='slateblue')
+
+    # plot points on line
+    ax2.plot(np.arange(0.5, 12.5), df_obs['Strmflw'], color='slateblue', marker='o')
+
+    # set axis limits and labels
+    ax.set_ylim(0, 5)
+    ax.set_xlim(0, 12)
+    ax.set_xticklabels(['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'])
+    ax2.set_ylabel('Flow Discharge($m^3/s$)')
+
+    plt.title(title)
+
+    plt.show()
+
+    return ax, ax2
+
+
+
+
+

C.1.4. plot_annual_heatmap()#

+
def plot_annual_heatmap(arr_sim, df_obs, title='', figsize=[14,5]):
+    """Plot a sensitivity metric overlain by observed flow..
+
+    :param arr_sim:         Numpy array of simulated metrics
+
+    :param df_obs:          Dataframe of mean monthly observed data from sensitivity analysis
+
+    :param title:           Title of plot
+    :type title:            str
+
+    :param figsize:         Matplotlib figure size
+    :type figsize:          list
+
+    """
+
+    # set up figure
+    fig, ax = plt.subplots(figsize=figsize)
+
+    # plot heatmap
+    sns.heatmap(arr_sim, ax=ax, cmap=sns.color_palette("YlOrBr"))
+
+    # setup overlay axis
+    ax2 = ax.twinx()
+
+    # plot line
+    ax2.plot(np.arange(0.5, 10.5), df_obs['Strmflw'], color='slateblue')
+
+    # plot points on line
+    ax2.plot(np.arange(0.5, 10.5), df_obs['Strmflw'], color='slateblue', marker='o')
+
+    # set up axis lables and limits
+    ax.set_ylim(0, 5)
+    ax.set_xlim(0, 10)
+    ax.set_yticklabels(['Kq', 'Ks', 'Alp', 'Huz', 'B'])
+    ax.set_xticklabels(range(2000, 2010))
+    ax2.set_ylabel('Flow Discharge($m^3/s$)')
+
+    plt.title(title)
+
+    return ax, ax2
+
+
+
+
+

C.1.5. plot_varying_heatmap()#

+
def plot_varying_heatmap(arr_sim, df_obs, title='', figsize=[14,5]):
+    """Plot a sensitivity metric overlain by observed flow..
+
+    :param arr_sim:         Numpy array of simulated metrics
+
+    :param df_obs:          Dataframe of mean monthly observed data from sensitivity analysis
+
+    :param title:           Title of plot
+    :type title:            str
+
+    :param figsize:         Matplotlib figure size
+    :type figsize:          list
+
+    """
+
+    # set up figure
+    fig, ax = plt.subplots(figsize=figsize)
+
+    # plot heatmap
+    sns.heatmap(arr_sim,
+                ax=ax,
+                yticklabels=['Kq', 'Ks', 'Alp', 'Huz', 'B'],
+                cmap=sns.light_palette("seagreen", as_cmap=True))
+
+    n_years = df_obs.shape[0]
+
+    # setup overlay axis
+    ax2 = ax.twinx()
+
+    # plot line
+    ax2.plot(range(0, n_years), df_obs['Strmflw'], color='slateblue')
+
+    # plot points on line
+    ax2.plot(range(0, n_years), df_obs['Strmflw'], color='slateblue', marker='o')
+
+    # set up axis lables and limits
+    ax.set_ylim(0, 5)
+    ax.set_xlim(-0.5, 119.5)
+    ax2.set_ylabel('Flow Discharge')
+    ax.set_xlabel('Number of Months')
+
+    plt.title(title)
+
+    return ax, ax2
+
+
+
+
+

C.1.6. plot_precalibration_flow()#

+
def plot_precalibration_flow(df_sim, df_obs, figsize=[10, 4]):
+    """Plot flow discharge provided by the ensemble of parameters sets from Pre-Calibration versus the observed
+    flow data.
+
+    :param df_sim:          Dataframe of simulated metrics
+
+    :param df_obs:          Dataframe of mean monthly observed data from sensitivity analysis
+
+    :param figsize:         Matplotlib figure size
+    :type figsize:          list
+
+    """
+
+    # set up figure
+    fig, ax = plt.subplots(figsize=figsize)
+
+    # set axis labels
+    ax.set_xlabel('Days')
+    ax.set_ylabel('Flow Discharge')
+
+    # plot pre-calibration results
+    for i in range(df_sim.shape[1]):
+        plt.plot(range(len(df_sim)), df_sim.iloc[:, i],  color="lightgreen", alpha=0.2)
+
+    # plot observed
+    plt.plot(range(len(df_sim)), df_obs['Strmflw'],  color="black")
+
+    plt.title('Observed vs. Pre-Calibration Outputs')
+
+    # customize legend
+    custom_lines = [Line2D([0], [0],  color="lightgreen", lw=4),
+                    Line2D([0], [0], color="black", lw=4)]
+    plt.legend(custom_lines, ['Pre-Calibration', 'Observed'])
+
+    return ax
+
+
+
+
+

C.1.7. plot_precalibration_glue()#

+
def plot_precalibration_glue(df_precal, df_glue, df_obs, figsize=[10, 4]):
+    """Plot flow discharge provided by the ensemble of parameters sets from Pre-Calibration versus the observed
+    flow data.
+
+    :param df_sim:          Dataframe of simulated metrics
+
+    :param df_obs:          Dataframe of mean monthly observed data from sensitivity analysis
+
+    :param figsize:         Matplotlib figure size
+    :type figsize:          list
+
+    """
+
+    # set up figure
+    fig, ax = plt.subplots(figsize=figsize)
+
+    # set axis labels
+    ax.set_xlabel('Days')
+    ax.set_ylabel('Flow Discharge')
+
+    # plot pre-calibration results
+    for i in range(df_precal.shape[1]):
+        plt.plot(range(len(df_precal)), df_precal.iloc[:, i],  color="lightgreen", alpha=0.2)
+
+    # plot glue
+    for i in range(df_glue.shape[1]):
+        plt.plot(range(len(df_glue)), df_glue.iloc[:, i], color="lightblue", alpha=0.2)
+
+    # plot observed
+    plt.plot(range(len(df_precal)), df_obs['Strmflw'],  color="black")
+
+    plt.title('Observed vs. Sensitivity Analysis Outputs across GLUE/Pre-Calibration')
+
+    # customize legend
+    custom_lines = [Line2D([0], [0],  color="lightgreen", lw=4),
+                    Line2D([0], [0], color="lightblue", lw=4),
+                    Line2D([0], [0], color="black", lw=4)]
+    plt.legend(custom_lines, ['Pre-Calibration', 'GLUE', 'Observed'])
+
+    return ax
+
+
+
+
+
+

C.2. fishery_dynamics.ipynb#

+

The following are the plotting functions as described in the fishery_dynamics.ipynb Jupyter notebook tutorial.

+

The following are the necessary package imports to run these functions:

+
import numpy as np
+import matplotlib.pyplot as plt
+
+from matplotlib import patheffects as pe
+
+
+
+

C.2.1. plot_objective_performance()#

+
def plot_objective_performance(objective_performance, profit_solution, robust_solution, figsize=(18, 9)):
+    """Plot the identified solutions with regards to their objective performance
+    in a parallel axis plot
+
+    :param objective_performance:               Objective performance array
+    :param profit_solution:                     Profitable solutions array
+    :param robust_solution:                     Robust solutions array
+    :param figsize:                             Figure size
+    :type figsize:                              tuple
+
+    """
+
+    # create the figure object
+    fig = plt.figure(figsize=figsize)
+
+    # set up subplot axis object
+    ax = fig.add_subplot(1, 1, 1)
+
+    # labels where constraint is always 0
+    objs_labels = ['Net present\nvalue (NPV)',
+                   'Prey population deficit',
+                   'Longest duration\nof low harvest',
+                   'Worst harvest instance',
+                   'Variance of harvest',
+                   'Duration of predator\npopulation collapse']
+
+    # normalization across objectives
+    mins = objective_performance.min(axis=0)
+    maxs = objective_performance.max(axis=0)
+    norm_reference = objective_performance.copy()
+
+    for i in range(5):
+        mm = objective_performance[:, i].min()
+        mx = objective_performance[:, i].max()
+        if mm != mx:
+            norm_reference[:, i] = (objective_performance[:, i] - mm) / (mx - mm)
+        else:
+            norm_reference[:, i] = 1
+
+    # colormap from matplotlib
+    cmap = plt.cm.get_cmap("Blues")
+
+    # plot all solutions
+    for i in range(len(norm_reference[:, 0])):
+        ys = np.append(norm_reference[i, :], 1.0)
+        xs = range(len(ys))
+        ax.plot(xs, ys, c=cmap(ys[0]), linewidth=2)
+
+    # to highlight robust solutions
+    ys = np.append(norm_reference[profit_solution, :], 1.0)  # Most profitable
+    xs = range(len(ys))
+    l1 = ax.plot(xs[0:6],
+                 ys[0:6],
+                 c=cmap(ys[0]),
+                 linewidth=3,
+                 label='Most robust in NPV',
+                 path_effects=[pe.Stroke(linewidth=6, foreground='darkgoldenrod'), pe.Normal()])
+
+    ys = np.append(norm_reference[robust_solution, :], 1.0)  # Most robust in all criteria
+    xs = range(len(ys))
+    l2 = ax.plot(xs[0:6],
+                 ys[0:6],
+                 c=cmap(ys[0]),
+                 linewidth=3,
+                 label='Most robust across criteria',
+                 path_effects=[pe.Stroke(linewidth=6, foreground='gold'), pe.Normal()])
+
+    # build colorbar
+    sm = plt.cm.ScalarMappable(cmap=cmap)
+    sm.set_array([objective_performance[:, 0].min(), objective_performance[:, 0].max()])
+    cbar = fig.colorbar(sm)
+    cbar.ax.set_ylabel("\nNet present value (NPV)")
+
+    # tick values
+    minvalues = ["{0:.3f}".format(mins[0]),
+                 "{0:.3f}".format(-mins[1]),
+                 str(-mins[2]),
+                 "{0:.3f}".format(-mins[3]),
+                 "{0:.2f}".format(-mins[4]),
+                 str(0)]
+
+    maxvalues = ["{0:.2f}".format(maxs[0]),
+                 "{0:.3f}".format(-maxs[1]),
+                 str(-maxs[2]),
+                 "{0:.2f}".format(maxs[3]),
+                 "{0:.2f}".format(-maxs[4]),
+                 str(0)]
+
+    ax.set_ylabel("Preference ->", size=12)
+    ax.set_yticks([])
+    ax.set_xticks([0, 1, 2, 3, 4, 5])
+    ax.set_xticklabels([minvalues[i] + '\n' + objs_labels[i] for i in range(len(objs_labels))])
+
+    # make a twin axis for toplabels
+    ax1 = ax.twiny()
+    ax1.set_yticks([])
+    ax1.set_xticks([0, 1, 2, 3, 4, 5])
+    ax1.set_xticklabels([maxvalues[i] for i in range(len(maxs) + 1)])
+
+    return ax, ax1
+
+
+
+
+

C.2.2. plot_factor_performance()#

+
def plot_factor_performance(param_values, collapse_days, b, m, a):
+    """Visualize the performance of our policies in three-dimensional
+    parametric space.
+
+    :param param_values:                Saltelli sample array
+    :param collapse_days:               Simulation array
+    :param b:                           b parameter boundary interval
+    :param m:                           m parameter boundary interval
+    :param a:                           a parameter boundary interval
+
+    """
+
+    # set colormap
+    cmap = plt.cm.get_cmap("RdBu_r")
+
+    # build figure object
+    fig = plt.figure(figsize=plt.figaspect(0.5), dpi=600, constrained_layout=True)
+
+    # set up scalable colormap
+    sm = plt.cm.ScalarMappable(cmap=cmap)
+
+    # set up subplot for profit maximizing policy
+    ax1 = fig.add_subplot(1, 2, 1, projection='3d')
+
+    # add point data for profit plot
+    sows = ax1.scatter(param_values[:,1],
+                       param_values[:,6],
+                       param_values[:,0],
+                       c=collapse_days[:,0],
+                       cmap=cmap,
+                       s=0.5)
+
+    # add surface data for boundary separating successful and failed states of the world
+    pts_ineq = ax1.plot_surface(b, m, a, color='black', alpha=0.25, zorder=1)
+
+    # add reference point to plot
+    pt_ref = ax1.scatter(0.5, 0.7, 0.005, c='black', s=50, zorder=0)
+
+    # set up plot aesthetics and labels
+    ax1.set_xlabel("b")
+    ax1.set_ylabel("m")
+    ax1.set_zlabel("a")
+    ax1.set_zlim([0.0, 2.0])
+    ax1.set_xlim([0.0, 1.0])
+    ax1.set_ylim([0.0, 1.5])
+    ax1.xaxis.set_view_interval(0,  0.5)
+    ax1.set_facecolor('white')
+    ax1.view_init(12, -17)
+    ax1.set_title('Profit maximizing policy')
+
+    # set up subplot for robust policy
+    ax2 = fig.add_subplot(1, 2, 2, projection='3d')
+
+    # add point data for robust plot
+    sows = ax2.scatter(param_values[:,1],
+                       param_values[:,6],
+                       param_values[:,0],
+                       c=collapse_days[:,1],
+                       cmap=cmap,
+                       s=0.5)
+
+    # add surface data for boundary separating successful and failed states of the world
+    pts_ineq = ax2.plot_surface(b, m, a, color='black', alpha=0.25, zorder=1)
+
+    # add reference point to plot
+    pt_ref = ax2.scatter(0.5, 0.7, 0.005, c='black', s=50, zorder=0)
+
+    # set up plot aesthetics and labels
+    ax2.set_xlabel("b")
+    ax2.set_ylabel("m")
+    ax2.set_zlabel("a")
+    ax2.set_zlim([0.0, 2.0])
+    ax2.set_xlim([0.0, 1.0])
+    ax2.set_ylim([0.0, 1.5])
+    ax2.xaxis.set_view_interval(0, 0.5)
+    ax2.set_facecolor('white')
+    ax2.view_init(12, -17)
+    ax2.set_title('Robust policy')
+
+    # set up colorbar
+    sm.set_array([collapse_days.min(), collapse_days.max()])
+    cbar = fig.colorbar(sm)
+    cbar.set_label('Days with predator collapse')
+
+    return ax1, ax2
+
+
+
+
+
+ + +
+ + + + + + + + +
+ + + + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/R.Bibliography.html b/dev/docs/html/R.Bibliography.html new file mode 100644 index 0000000..bf691ff --- /dev/null +++ b/dev/docs/html/R.Bibliography.html @@ -0,0 +1,1264 @@ + + + + + + + + + + + Bibliography — Addressing Uncertainty in MultiSector Dynamics Research documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Bibliography

+ +
+
+ +
+
+
+ + + + +
+ +
+

Bibliography#

+
+
+
+[1] +

G. E. P. Box. Science and Statistics. Journal of the American Statistical Association, 71(356):791–799, 1976. URL: https://www.tandfonline.com/doi/abs/10.1080/01621459.1976.10480949, doi:10.1080/01621459.1976.10480949.

+
+
+[2] +

National Research Council and others. Convergence: Facilitating transdisciplinary integration of life sciences, physical sciences, engineering, and beyond. National Academies Press, 2014.

+
+
+[3] +

Sondoss Elsawah, Tatiana Filatova, Anthony J Jakeman, Albert J Kettner, Moira L Zellner, Ioannis N Athanasiadis, Serena H Hamilton, Robert L Axtell, Daniel G Brown, Jonathan M Gilligan, and others. Eight grand challenges in socio-environmental systems modeling. Socio-Environmental Systems Modelling, 2:16226–16226, 2020.

+
+
+[4] +

Yacov Y Haimes. Risk modeling of interdependent complex systems of systems: theory and practice. Risk analysis, 38(1):84–98, 2018.

+
+
+[5] +

Dirk Helbing. Globally networked risks and how to respond. Nature, 497(7447):51–59, 2013.

+
+
+[6] +

Andrea Saltelli, Ksenia Aleksankina, William Becker, Pamela Fennell, Federico Ferretti, Niels Holst, Sushan Li, and Qiongli Wu. Why so many published sensitivity analyses are false: a systematic review of sensitivity analysis practices. Environmental modelling & software, 114:29–39, 2019.

+
+
+[7] +

Daniel Wirtz and Wolfgang Nowak. The rocky road to extended simulation frameworks covering uncertainty, inversion, optimization and control. Environmental Modelling & Software, 93:180–192, 2017.

+
+
+[8] +

Roger Cooke and others. Experts in uncertainty: opinion and subjective probability in science. Oxford University Press on Demand, 1991.

+
+
+[9] +

Enayat A Moallemi, Jan Kwakkel, Fjalar J de Haan, and Brett A Bryan. Exploratory modeling for analyzing coupled human-natural systems under uncertainty. Global Environmental Change, 65:102186, 2020.

+
+
+[10] +

Warren E Walker, Poul Harremoës, Jan Rotmans, Jeroen P Van Der Sluijs, Marjolein BA Van Asselt, Peter Janssen, and Martin P Krayer von Krauss. Defining uncertainty: a conceptual basis for uncertainty management in model-based decision support. Integrated assessment, 4(1):5–17, 2003.

+
+
+[11] +

Andrea Saltelli, Philip B Stark, William Becker, and Pawel Stano. Climate models as economic guides scientific challenge or quixotic quest? Issues in Science and Technology, 31(3):79–84, 2015.

+
+
+[12] +

Hoshin V. Gupta, Thorsten Wagener, and Yuqiong Liu. Reconciling theory with observations: elements of a diagnostic approach to model evaluation. Hydrological Processes: An International Journal, 22(18):3802–3813, 2008. Publisher: Wiley Online Library.

+
+
+[13] +

Antonia Hadjimichael, Julianne Quinn, and Patrick Reed. Advancing Diagnostic Model Evaluation to Better Understand Water Shortage Mechanisms in Institutionally Complex River Basins. Water Resources Research, 56(10):e2020WR028079, 2020. URL: http://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2020WR028079 (visited on 2020-10-16), doi:10.1029/2020WR028079.

+
+
+[14] +

Andrea Saltelli, Marco Ratto, Terry Andres, Francesca Campolongo, Jessica Cariboni, Debora Gatelli, Michaela Saisana, and Stefano Tarantola. Global Sensitivity Analysis: The Primer. Wiley-Interscience, Chichester, England ; Hoboken, NJ, 1 edition edition, February 2008. ISBN 978-0-470-05997-5.

+
+
+[15] +

Keith Beven. Towards a coherent philosophy for modelling the environment. Proceedings of the royal society of London. Series A: mathematical, physical and engineering sciences, 458(2026):2465–2484, 2002.

+
+
+[16] +

Naomi Oreskes, Kristin Shrader-Frechette, and Kenneth Belitz. Verification, Validation, and Confirmation of Numerical Models in the Earth Sciences. Science, 263(5147):641–646, February 1994. URL: https://science.sciencemag.org/content/263/5147/641 (visited on 2020-04-15), doi:10.1126/science.263.5147.641.

+
+
+[17] +

Keith Beven. Prophecy, reality and uncertainty in distributed hydrological modelling. Advances in water resources, 16(1):41–51, 1993.

+
+
+[18] +

Keith Beven and Andrew Binley. The future of distributed models: Model calibration and uncertainty prediction. Hydrological Processes, 6(3):279–298, 1992. doi:10.1002/hyp.3360060305.

+
+
+[19] +

Yaman Barlas and Stanley Carpenter. Philosophical roots of model validation: Two paradigms. System Dynamics Review, 6(2):148–166, 1990. doi:10.1002/sdr.4260060203.

+
+
+[20] +

Stephen Toulmin. From form to function: philosophy and history of science in the 1950s and now. Daedalus, pages 143–162, 1977. Publisher: JSTOR.

+
+
+[21] +

George B. Kleindorfer, Liam O'Neill, and Ram Ganeshan. Validation in simulation: Various positions in the philosophy of science. Management Science, 44(8):1087–1099, 1998. Publisher: INFORMS.

+
+
+[22] +

Sibel Eker, Elena Rovenskaya, Michael Obersteiner, and Simon Langan. Practice and perspectives in the validation of resource management models. Nature communications, 9(1):1–10, 2018.

+
+
+[23] +

Yaman Barlas. Formal aspects of model validity and validation in system dynamics. System Dynamics Review: The Journal of the System Dynamics Society, 12(3):183–210, 1996. Publisher: Wiley Online Library.

+
+
+[24] +

Thomas H. Naylor and Joseph Michael Finger. Verification of computer simulation models. Management science, 14(2):B–92, 1967. Publisher: INFORMS.

+
+
+[25] +

Keith J Beven. On hypothesis testing in hydrology: why falsification of models is still a really good idea. Wiley Interdisciplinary Reviews: Water, 5(3):e1278, 2018.

+
+
+[26] +

Hoshin V Gupta, Martyn P Clark, Jasper A Vrugt, Gab Abramowitz, and Ming Ye. Towards a comprehensive assessment of model structural adequacy. Water Resources Research, 2012.

+
+
+[27] +

Praveen Kumar. Typology of hydrologic predictability. Water Resources Research, 2011. URL: https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2010WR009769 (visited on 2020-04-15), doi:10.1029/2010WR009769.

+
+
+[28] +

Grey S. Nearing, Benjamin L. Ruddell, Andrew R. Bennett, Cristina Prieto, and Hoshin V. Gupta. Does Information Theory Provide a New Paradigm for Earth Science? Hypothesis Testing. Water Resources Research, 56(2):e2019WR024918, 2020. doi:10.1029/2019WR024918.

+
+
+[29] +

Hoshin Vijai Gupta, Soroosh Sorooshian, and Patrice Ogou Yapo. Toward improved calibration of hydrologic models: Multiple and noncommensurable measures of information. Water Resources Research, 34(4):751–763, 1998. URL: http://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/97WR03495 (visited on 2020-04-07), doi:10.1029/97WR03495.

+
+
+[30] +

Francesca Pianosi and Thorsten Wagener. Understanding the time-varying importance of different uncertainty sources in hydrological modelling using global sensitivity analysis. Hydrological Processes, pages 3991–4003, November 2017. URL: https://onlinelibrary.wiley.com/doi/abs/10.1002/hyp.10968%4010.1111/%28ISSN%291099-1085.Kieth-Beven, doi:10.1002/hyp.10968@10.1111/(ISSN)1099-1085.Kieth-Beven.

+
+
+[31] +

Charles Rougé, Patrick M. Reed, Danielle S. Grogan, Shan Zuidema, Alexander Prusevich, Stanley Glidden, Jonathan R. Lamontagne, and Richard B. Lammers. Coordination and Control: Limits in Standard Representations of Multi-Reservoir Operations in Hydrological Modeling. Hydrology and Earth System Sciences Discussions, pages 1–37, November 2019. URL: https://www.hydrol-earth-syst-sci-discuss.net/hess-2019-589/, doi:https://doi.org/10.5194/hess-2019-589.

+
+
+[32] +

David W. Cash, William C. Clark, Frank Alcock, Nancy M. Dickson, Noelle Eckley, David H. Guston, Jill Jäger, and Ronald B. Mitchell. Knowledge systems for sustainable development. Proceedings of the national academy of sciences, 100(14):8086–8091, 2003. Publisher: National Acad Sciences.

+
+
+[33] +

Dave D. White, Amber Wutich, Kelli L. Larson, Patricia Gober, Timothy Lant, and Clea Senneville. Credibility, salience, and legitimacy of boundary objects: water managers' assessment of a simulation model in an immersive decision theater. Science and Public Policy, 37(3):219–232, April 2010. Publisher: Oxford Academic. URL: https://academic.oup.com/spp/article/37/3/219/1626552 (visited on 2020-05-12), doi:10.3152/030234210X497726.

+
+
+[34] +

Andrea Saltelli and Silvio Funtowicz. When all models are wrong. Issues in Science and Technology, 30(2):79–85, 2014. Publisher: JSTOR.

+
+
+[35] +

Thorsten Wagener and Francesca Pianosi. What has Global Sensitivity Analysis ever done for us? A systematic review to support scientific advancement and to inform policy-making in earth system modelling. Earth-Science Reviews, 194:1–18, July 2019. URL: https://www.sciencedirect.com/science/article/pii/S0012825218300990 (visited on 2021-08-30), doi:10.1016/j.earscirev.2019.04.006.

+
+
+[36] +

Steve Bankes. Exploratory Modeling for Policy Analysis. Operations Research, 41(3):435–449, June 1993. URL: https://pubsonline.informs.org/doi/abs/10.1287/opre.41.3.435 (visited on 2018-09-11), doi:10.1287/opre.41.3.435.

+
+
+[37] +

Christopher P. Weaver, Robert J. Lempert, Casey Brown, John A. Hall, David Revell, and Daniel Sarewitz. Improving the contribution of climate model information to decision making: the value and demands of robust decision frameworks. Wiley Interdisciplinary Reviews: Climate Change, 4(1):39–60, 2013. URL: https://onlinelibrary.wiley.com/doi/abs/10.1002/wcc.202 (visited on 2019-10-01), doi:10.1002/wcc.202.

+
+
+[38] +

Andrea Saltelli, Stefano Tarantola, Francesca Campolongo, and Marco Ratto. Sensitivity analysis in practice: a guide to assessing scientific models. Volume 1. Wiley Online Library, 2004.

+
+
+[39] +

Emanuele Borgonovo and Elmar Plischke. Sensitivity analysis: a review of recent advances. European Journal of Operational Research, 248(3):869–887, 2016.

+
+
+[40] +

O Rakovec, Mary C Hill, MP Clark, AH Weerts, AJ Teuling, and R Uijlenhoet. Distributed evaluation of local sensitivity analysis (delsa), with application to hydrologic models. Water Resources Research, 50(1):409–426, 2014.

+
+
+[41] +

Andrea Saltelli and Paola Annoni. How to avoid a perfunctory sensitivity analysis. Environmental Modelling & Software, 25(12):1508–1517, 2010.

+
+
+[42] +

Yong Tang, Patrick Reed, Thibaut Wagener, and K van Werkhoven. Comparing sensitivity analysis methods to advance lumped watershed model identification and evaluation. Hydrology and Earth System Sciences, 11(2):793–817, 2007.

+
+
+[43] +

Nicholas AS Hamm, Jim W Hall, and MG Anderson. Variance-based sensitivity analysis of the probability of hydrologically induced slope instability. Computers & geosciences, 32(6):803–817, 2006.

+
+
+[44] +

Andrea Saltelli, Ksenia Aleksankina, William Becker, Pamela Fennell, Federico Ferretti, Niels Holst, Sushan Li, and Qiongli Wu. Why so many published sensitivity analyses are false: a systematic review of sensitivity analysis practices. Environmental modelling & software, 114:29–39, 2019.

+
+
+[45] +

Benjamin P Bryant and Robert J Lempert. Thinking inside the box: a participatory, computer-assisted approach to scenario discovery. Technological Forecasting and Social Change, 77(1):34–49, 2010.

+
+
+[46] +

Andrea Saltelli and Stefano Tarantola. On the relative importance of input factors in mathematical models: safety assessment for nuclear waste disposal. Journal of the American Statistical Association, 97(459):702–709, 2002.

+
+
+[47] +

Barry Anderson, Emanuele Borgonovo, Marzio Galeotti, and Roberto Roson. Uncertainty in climate change modeling: can global sensitivity analysis be of help? Risk analysis, 34(2):271–293, 2014.

+
+
+[48] +

Emanuele Borgonovo. Sensitivity analysis with finite changes: an application to modified eoq models. European Journal of Operational Research, 200(1):127–138, 2010.

+
+
+[49] +

Andrea Saltelli, Marco Ratto, Terry Andres, Francesca Campolongo, Jessica Cariboni, Debora Gatelli, Michaela Saisana, and Stefano Tarantola. Global sensitivity analysis: the primer. John Wiley & Sons, 2008.

+
+
+[50] +

Neil R Edwards, David Cameron, and Jonathan Rougier. Precalibrating an intermediate complexity climate model. Climate dynamics, 37(7):1469–1482, 2011.

+
+
+[51] +

Francesca Pianosi, Keith Beven, Jim Freer, Jim W Hall, Jonathan Rougier, David B Stephenson, and Thorsten Wagener. Sensitivity analysis of environmental models: a systematic review with practical workflow. Environmental Modelling & Software, 79:214–232, 2016.

+
+
+[52] +

RC Spear and GM Hornberger. Eutrophication in peel inlet—ii. identification of critical uncertainties via generalized sensitivity analysis. Water research, 14(1):43–49, 1980.

+
+
+[53] +

Jon C Helton, Jay Dean Johnson, Cedric J Sallaberry, and Curt B Storlie. Survey of sampling-based methods for uncertainty and sensitivity analysis. Reliability Engineering & System Safety, 91(10-11):1175–1209, 2006.

+
+
+[54] +

Ronald Aylmer Fisher. Design of experiments. Br Med J, 1(3923):554–554, 1936.

+
+
+[55] +

JD Herman, PM Reed, and T Wagener. Time-varying sensitivity analysis clarifies the effects of watershed model formulation on model behavior. Water Resources Research, 49(3):1400–1414, 2013.

+
+
+[56] +

Carolina Massmann, Thorsten Wagener, and Hubert Holzmann. A new approach to visualizing time-varying sensitivity indices for environmental model diagnostics across evaluation time-scales. Environmental modelling & software, 51:190–194, 2014.

+
+
+[57] +

An Van Schepdael, Aurélie Carlier, and Liesbet Geris. Sensitivity analysis by design of experiments. In Uncertainty in Biology, pages 327–366. Springer, 2016.

+
+
+[58] +

Nicholas Metropolis and Stanislaw Ulam. The monte carlo method. Journal of the American statistical association, 44(247):335–341, 1949.

+
+
+[59] +

John Norton. An introduction to sensitivity assessment of simulation models. Environmental Modelling & Software, 69:166–174, 2015.

+
+
+[60] +

Douglas C Montgomery. Design and analysis of experiments. John wiley & sons, 2017.

+
+
+[61] +

George EP Box and J Stuart Hunter. The 2 k—p fractional factorial designs. Technometrics, 3(3):311–351, 1961.

+
+
+[62] +

Izabella Surowiec, Ludvig Vikstrom, Gustaf Hector, Erik Johansson, Conny Vikstrom, and Johan Trygg. Generalized subset designs in analytical chemistry. Analytical chemistry, 89(12):6491–6497, 2017.

+
+
+[63] +

Michael D McKay, Richard J Beckman, and William J Conover. A comparison of three methods for selecting values of input variables in the analysis of output from a computer code. Technometrics, 21(1):239–2451, 1979.

+
+
+[64] +

Boxin Tang. Orthogonal array-based latin hypercubes. Journal of the American statistical association, 88(424):1392–1397, 1993.

+
+
+[65] +

Ishaan L Dalal, Deian Stefan, and Jared Harwayne-Gidansky. Low discrepancy sequences for monte carlo simulations on reconfigurable platforms. In 2008 International Conference on Application-Specific Systems, Architectures and Processors, 108–113. IEEE, 2008.

+
+
+[66] +

SK Zaremba. The mathematical basis of monte carlo and quasi-monte carlo methods. SIAM review, 10(3):303–314, 1968.

+
+
+[67] +

Sergei Kucherenko, Daniel Albrecht, and Andrea Saltelli. Exploring multi-dimensional spaces: a comparison of latin hypercube and quasi monte carlo sampling techniques. arXiv preprint arXiv:1505.02350, 2015.

+
+
+[68] +

Bertrand Iooss, Loïc Boussouf, Vincent Feuillard, and Amandine Marrel. Numerical studies of the metamodel fitting and validation processes. arXiv preprint arXiv:1001.1049, 2010.

+
+
+[69] +

Ruichen Jin, Wei Chen, and Agus Sudjianto. An efficient algorithm for constructing optimal design of computer experiments. In International Design Engineering Technical Conferences and Computers and Information in Engineering Conference, volume 37009, 545–554. 2003.

+
+
+[70] +

Max D Morris and Toby J Mitchell. Exploratory designs for computational experiments. Journal of statistical planning and inference, 43(3):381–402, 1995.

+
+
+[71] +

Jeong-Soo Park. Optimal latin-hypercube designs for computer experiments. Journal of statistical planning and inference, 39(1):95–111, 1994.

+
+
+[72] +

Ilya M Sobol. Uniformly distributed sequences with an additional uniform property. USSR Computational Mathematics and Mathematical Physics, 16(5):236–242, 1976.

+
+
+[73] +

Il'ya Meerovich Sobol'. On the distribution of points in a cube and the approximate evaluation of integrals. Zhurnal Vychislitel'noi Matematiki i Matematicheskoi Fiziki, 7(4):784–802, 1967.

+
+
+[74] +

Max D Morris. Factorial sampling plans for preliminary computational experiments. Technometrics, 33(2):161–174, 1991.

+
+
+[75] +

RI Cukier, CM Fortuin, Kurt E Shuler, AG Petschek, and JH Schaibly. Study of the sensitivity of coupled reaction systems to uncertainties in rate coefficients. i theory. The Journal of chemical physics, 59(8):3873–3878, 1973.

+
+
+[76] +

Andrea Saltelli, Stefano Tarantola, and KP-S Chan. A quantitative model-independent method for global sensitivity analysis of model output. Technometrics, 41(1):39–56, 1999.

+
+
+[77] +

Ilya M Sobol. Global sensitivity indices for nonlinear mathematical models and their monte carlo estimates. Mathematics and computers in simulation, 55(1-3):271–280, 2001.

+
+
+[78] +

Jonathan D Herman, Harrison B Zeff, Jonathan R Lamontagne, Patrick M Reed, and Gregory W Characklis. Synthetic drought scenario generation to support bottom-up water supply vulnerability assessments. Journal of Water Resources Planning and Management, 142(11):04016050, 2016.

+
+
+[79] +

PCD Milly, Julio Betancourt, Malin Falkenmark, Robert M Hirsch, Zbigniew W Kundzewicz, Dennis P Lettenmaier, and Ronald J Stouffer. Stationarity is dead: whither water management? Earth, 4:20, 2008.

+
+
+[80] +

Edoardo Borgomeo, Christopher L Farmer, and Jim W Hall. Numerical rivers: a synthetic streamflow generator for water resources vulnerability assessments. Water Resources Research, 51(7):5382–5405, 2015.

+
+
+[81] +

Manuel Herrera, Sukumar Natarajan, David A Coley, Tristan Kershaw, Alfonso P Ramallo-González, Matthew Eames, Daniel Fosas, and Michael Wood. A review of current and future weather data for building simulation. Building Services Engineering Research and Technology, 38(5):602–627, 2017.

+
+
+[82] +

Daniel S Wilks and Robert L Wilby. The weather generation game: a review of stochastic weather models. Progress in physical geography, 23(3):329–357, 1999.

+
+
+[83] +

JR Lamontagne and JR Stedinger. Generating synthetic streamflow forecasts with specified precision. Journal of Water Resources Planning and Management, 144(4):04018007, 2018.

+
+
+[84] +

Sanghamitra Medda and Kalyan Kumar Bhar. Comparison of single-site and multi-site stochastic models for streamflow generation. Applied Water Science, 9(3):67, 2019.

+
+
+[85] +

Brian R Kirsch, Gregory W Characklis, and Harrison B Zeff. Evaluating the impact of alternative hydro-climate scenarios on transfer agreements: practical improvement for generating synthetic streamflows. Journal of Water Resources Planning and Management, 139(4):396–406, 2013.

+
+
+[86] +

Daniel P Loucks and Eelco Van Beek. Water resource systems planning and management: An introduction to methods, models, and applications. Springer, 2017.

+
+
+[87] +

Scott Steinschneider, Sungwook Wi, and Casey Brown. The integrated effects of climate and hydrologic uncertainty on future flood risk assessments. Hydrological Processes, 29(12):2823–2839, 2015.

+
+
+[88] +

Richard M Vogel. Stochastic watershed models for hydrologic risk management. Water Security, 1:28–35, 2017.

+
+
+[89] +

Richard M Vogel and Jery R Stedinger. The value of stochastic streamflow models in overyear reservoir design applications. Water Resources Research, 24(9):1483–1490, 1988.

+
+
+[90] +

Emanuele Borgonovo. Sensitivity analysis of model output with input constraints: a generalized rationale for local methods. Risk Analysis: An International Journal, 28(3):667–680, 2008.

+
+
+[91] +

Bertrand Iooss and Paul Lemaître. A review on global sensitivity analysis methods. In Uncertainty management in simulation-optimization of complex systems, pages 101–122. Springer, 2015.

+
+
+[92] +

Francesca Campolongo and Roger Braddock. The use of graph theory in the sensitivity analysis of the model output: a second order screening method. Reliability Engineering & System Safety, 64(1):1–12, 1999.

+
+
+[93] +

Roger A Cropp and Roger D Braddock. The new morris method: an efficient second-order screening method. Reliability Engineering & System Safety, 78(1):77–83, 2002.

+
+
+[94] +

Jon C Helton. Uncertainty and sensitivity analysis techniques for use in performance assessment for radioactive waste disposal. Reliability Engineering & System Safety, 42(2-3):327–367, 1993.

+
+
+[95] +

Gemma Manache and Charles S Melching. Identification of reliable regression-and correlation-based sensitivity measures for importance ranking of water-quality model parameters. Environmental Modelling & Software, 23(5):549–562, 2008.

+
+
+[96] +

F Pappenberger and Keith J Beven. Ignorance is bliss: or seven reasons not to use uncertainty analysis. Water resources research, 2006.

+
+
+[97] +

Jerome H Friedman and Nicholas I Fisher. Bump hunting in high-dimensional data. Statistics and Computing, 9(2):123–143, 1999.

+
+
+[98] +

Leo Breiman, Jerome Friedman, Charles J Stone, and Richard A Olshen. Classification and regression trees. CRC press, 1984.

+
+
+[99] +

Yoav Freund, Robert Schapire, and Naoki Abe. A short introduction to boosting. Journal-Japanese Society For Artificial Intelligence, 14(771-780):1612, 1999.

+
+
+[100] +

Leo Breiman. Bagging predictors. Machine learning, 24(2):123–140, 1996.

+
+
+[101] +

George M Hornberger and Robert C Spear. Approach to the preliminary analysis of environmental systems. J. Environ. Mgmt., 12(1):7–18, 1981.

+
+
+[102] +

Robert J Lempert, David G Groves, Steven W Popper, and Steve C Bankes. A general, analytic method for generating robust strategies and narrative scenarios. Management science, 52(4):514–528, 2006.

+
+
+[103] +

David G Groves and Robert J Lempert. A new analytic method for finding policy-relevant scenarios. Global Environmental Change, 17(1):73–85, 2007.

+
+
+[104] +

Keith Beven and Andrew Binley. Glue: 20 years on. Hydrological processes, 28(24):5897–5918, 2014.

+
+
+[105] +

Roberta-Serena Blasone, Jasper A Vrugt, Henrik Madsen, Dan Rosbjerg, Bruce A Robinson, and George A Zyvoloski. Generalized likelihood uncertainty estimation (glue) using adaptive markov chain monte carlo sampling. Advances in Water Resources, 31(4):630–648, 2008.

+
+
+[106] +

SA Cryer and PL Havens. Regional sensitivity analysis using a fractional factorial method for the usda model gleams. Environmental modelling & software, 14(6):613–624, 1999.

+
+
+[107] +

Pengfei Wei, Zhenzhou Lu, and Xiukai Yuan. Monte carlo simulation for moment-independent sensitivity analysis. Reliability Engineering & System Safety, 110:60–67, 2013.

+
+
+[108] +

Peter Young. Data-based mechanistic modelling, generalised sensitivity and dominant mode analysis. Computer Physics Communications, 117(1-2):113–129, 1999.

+
+
+[109] +

Andrea Saltelli. Making best use of model evaluations to compute sensitivity indices. Computer physics communications, 145(2):280–297, 2002.

+
+
+[110] +

Andrea Saltelli. Sensitivity analysis for importance assessment. Risk analysis, 22(3):579–590, 2002.

+
+
+[111] +

Toshimitsu Homma and Andrea Saltelli. Importance measures in global sensitivity analysis of nonlinear models. Reliability Engineering & System Safety, 52(1):1–17, 1996.

+
+
+[112] +

Gregory J McRae, William R Goodin, and John H Seinfeld. Development of a second-generation mathematical model for urban air pollution—i. model formulation. Atmospheric Environment (1967), 16(4):679–696, 1982.

+
+
+[113] +

Andrea Saltelli and Ricardo Bolado. An alternative way to compute fourier amplitude sensitivity test (fast). Computational Statistics & Data Analysis, 26(4):445–460, 1998.

+
+
+[114] +

MA Vazquez-Cruz, R Guzman-Cruz, IL Lopez-Cruz, O Cornejo-Perez, I Torres-Pacheco, and RG Guevara-Gonzalez. Global sensitivity analysis by means of efast and sobol’methods and calibration of reduced state-variable tomgro model using genetic algorithms. Computers and Electronics in Agriculture, 100:1–12, 2014.

+
+
+[115] +

Benjamin Auder and Bertrand Iooss. Global sensitivity analysis based on entropy. In Safety, reliability and risk analysis-Proceedings of the ESREL 2008 Conference, 2107–2115. 2008.

+
+
+[116] +

Farkhondeh Khorashadi Zadeh, Jiri Nossent, Fanny Sarrazin, Francesca Pianosi, Ann van Griensven, Thorsten Wagener, and Willy Bauwens. Comparison of variance-based and moment-independent global sensitivity analysis approaches by application to the swat model. Environmental Modelling & Software, 91:210–222, 2017.

+
+
+[117] +

Francesca Pianosi and Thorsten Wagener. A simple and efficient method for global sensitivity analysis based on cumulative distribution functions. Environmental Modelling & Software, 67:1–11, 2015.

+
+
+[118] +

Ronald Aylmer Fisher and others. Statistical methods for research workers. Statistical methods for research workers., 1934.

+
+
+[119] +

Loıc Brevault, Mathieu Balesdent, Nicolas Bérend, and Rodolphe Le Riche. Comparison of different global sensitivity analysis methods for aerospace vehicle optimal design. In 10th World Congress on Structural and Multidisciplinary Optimization, WCSMO-10. 2013.

+
+
+[120] +

GEB Archer, Andrea Saltelli, and IM Sobol. Sensitivity measures, anova-like techniques and the use of bootstrap. Journal of Statistical Computation and Simulation, 58(2):99–120, 1997.

+
+
+[121] +

Art B Owen. Variance components and generalized sobol'indices. SIAM/ASA Journal on Uncertainty Quantification, 1(1):19–41, 2013.

+
+
+[122] +

Emanuele Borgonovo. Measuring uncertainty importance: investigation and comparison of alternative approaches. Risk analysis, 26(5):1349–1361, 2006.

+
+
+[123] +

Emanuele Borgonovo. A new uncertainty importance measure. Reliability Engineering & System Safety, 92(6):771–784, 2007.

+
+
+[124] +

Elmar Plischke, Emanuele Borgonovo, and Curtis L Smith. Global sensitivity measures from given data. European Journal of Operational Research, 226(3):536–550, 2013.

+
+
+[125] +

Jiri Nossent, Pieter Elsen, and Willy Bauwens. Sobol’ sensitivity analysis of a complex environmental model. Environmental Modelling & Software, 26(12):1515–1525, 2011. URL: https://www.sciencedirect.com/science/article/pii/S1364815211001939, doi:https://doi.org/10.1016/j.envsoft.2011.08.010.

+
+
+[126] +

Jon Herman and Will Usher. Salib: an open-source python library for sensitivity analysis. Journal of Open Source Software, 2(9):97, 2017.

+
+
+[127] +

Hoshin V Gupta, Thorsten Wagener, and Yuqiong Liu. Reconciling theory with observations: elements of a diagnostic approach to model evaluation. Hydrological Processes: An International Journal, 22(18):3802–3813, 2008.

+
+
+[128] +

Keith Beven and Jim Freer. Equifinality, data assimilation, and uncertainty estimation in mechanistic modelling of complex environmental systems using the glue methodology. Journal of hydrology, 249(1-4):11–29, 2001.

+
+
+[129] +

Cameron McPhail, HR Maier, JH Kwakkel, M Giuliani, A Castelletti, and S Westra. Robustness metrics: how are they calculated, when should they be used and why do they give different results? Earth's Future, 6(2):169–191, 2018.

+
+
+[130] +

John D Sterman. System dynamics modeling: tools for learning in a complex world. California management review, 43(4):8–25, 2001.

+
+
+[131] +

John M Anderies, Jean-Denis Mathias, and Marco A Janssen. Knowledge infrastructure and safe operating spaces in social–ecological systems. Proceedings of the National Academy of Sciences, 116(12):5277–5284, 2019.

+
+
+[132] +

Rachata Muneepeerakul and John M Anderies. The emergence and resilience of self-organized governance in coupled infrastructure systems. Proceedings of the National Academy of Sciences, 117(9):4617–4622, 2020.

+
+
+[133] +

Antonia Hadjimichael, Patrick M Reed, and Julianne D Quinn. Navigating deeply uncertain tradeoffs in harvested predator-prey systems. Complexity, 2020.

+
+
+[134] +

Julianne D Quinn, Patrick M Reed, and Klaus Keller. Direct policy search for robust multi-objective management of deeply uncertain socio-ecological tipping points. Environmental modelling & software, 92:125–141, 2017.

+
+
+[135] +

S. R. Carpenter, D. Ludwig, and W. A. Brock. Management of Eutrophication for Lakes Subject to Potentially Irreversible Change. Ecological Applications, 9(3):751–771, August 1999. URL: http://onlinelibrary.wiley.com/doi/10.1890/1051-0761(1999)009[0751:MOEFLS]2.0.CO;2/abstract, doi:10.1890/1051-0761(1999)009[0751:MOEFLS]2.0.CO;2.

+
+
+[136] +

Julianne Quinn. Julianneq/Lake_problem_dps. December 2017. original-date: 2017-02-06T18:33:54Z. URL: julianneq/Lake_Problem_DPS (visited on 2021-06-14).

+
+
+[137] +

David Hadka. Project-Platypus/Rhodium. 2017. original-date: 2015-10-29T18:08:43Z. URL: Project-Platypus/Rhodium (visited on 2021-06-14).

+
+
+[138] +

Stephen R. Carpenter, William A. Brock, Carl Folke, Egbert H. van Nes, and Marten Scheffer. Allowing variance may enlarge the safe operating space for exploited ecosystems. Proceedings of the National Academy of Sciences, 112(46):14384–14389, November 2015. URL: http://www.pnas.org/content/112/46/14384 (visited on 2017-08-18), doi:10.1073/pnas.1511804112.

+
+
+[139] +

Mustafa Hekimoğlu and Yaman Barlas. Sensitivity analysis for models with multiple behavior modes: a method based on behavior pattern measures. System Dynamics Review, 32(3-4):332–362, 2016.

+
+
+[140] +

Patrick Steinmann, Willem L Auping, and Jan H Kwakkel. Behavior-based scenario discovery using time series clustering. Technological Forecasting and Social Change, 156:120052, 2020.

+
+
+[141] +

Steven C Bankes, Robert J Lempert, and Steven W Popper. Computer-assisted reasoning. Computing in Science & Engineering, 3(2):71–77, 2001.

+
+
+[142] +

Robert J. Lempert, Steven W. Popper, and Steven C. Bankes. Shaping the Next One Hundred Years. RAND Corporation, 2003. URL: https://www.rand.org/pubs/monograph_reports/MR1626.html (visited on 2017-09-14).

+
+
+[143] +

Jonathan R Lamontagne, Patrick M Reed, Robert Link, Katherine V Calvin, Leon E Clarke, and James A Edmonds. Large ensemble analytic framework for consequence-driven discovery of climate change scenarios. Earth's Future, 6(3):488–504, 2018.

+
+
+[144] +

Brian C O’Neill, Elmar Kriegler, Keywan Riahi, Kristie L Ebi, Stephane Hallegatte, Timothy R Carter, Ritu Mathur, and Detlef P van Vuuren. A new scenario framework for climate change research: the concept of shared socioeconomic pathways. Climatic change, 122(3):387–400, 2014.

+
+
+[145] +

Warren E Walker, Marjolijn Haasnoot, and Jan H Kwakkel. Adapt or perish: a review of planning approaches for adaptation under deep uncertainty. Sustainability, 5(3):955–979, 2013.

+
+
+[146] +

Suraje Dessai, Mike Hulme, Robert Lempert, and Roger Pielke Jr. Climate prediction: a limit to adaptation. Adapting to climate change: thresholds, values, governance, 64:78, 2009.

+
+
+[147] +

Jonathan D Herman, Patrick M Reed, Harrison B Zeff, and Gregory W Characklis. How should robustness be defined for water systems planning under change? Journal of Water Resources Planning and Management, 141(10):04015012, 2015.

+
+
+[148] +

Robert J Lempert. Robust decision making (rdm). In Decision making under deep uncertainty, pages 23–51. Springer, Cham, 2019.

+
+
+[149] +

Jan H Kwakkel and Marjolijn Haasnoot. Supporting dmdu: a taxonomy of approaches and tools. In Decision Making under Deep Uncertainty, pages 355–374. Springer, Cham, 2019.

+
+
+[150] +

BC Trindade, PM Reed, and GW Characklis. Deeply uncertain pathways: integrated multi-city regional water supply infrastructure investment and portfolio management. Advances in Water Resources, 134:103442, 2019.

+
+
+[151] +

Julianne D Quinn, Patrick M Reed, Matteo Giuliani, Andrea Castelletti, Jared W Oyler, and Robert E Nicholas. Exploring how changing monsoonal dynamics and human pressures challenge multireservoir management for flood protection, hydropower production, and agricultural water supply. Water Resources Research, 54(7):4638–4662, 2018.

+
+
+[152] +

DF Gold, PM Reed, BC Trindade, and GW Characklis. Identifying actionable compromises: navigating multi-city robustness conflicts to discover cooperative safe operating spaces for regional water supply portfolios. Water Resources Research, 55(11):9024–9050, 2019.

+
+
+[153] +

JR Lamontagne, PM Reed, G Marangoni, K Keller, and GG Garner. Robust abatement pathways to tolerable climate futures require immediate global action. Nature Climate Change, 9(4):290–294, 2019.

+
+
+[154] +

Antonia Hadjimichael, Julianne Quinn, Erin Wilson, Patrick Reed, Leon Basdekas, David Yates, and Michelle Garrison. Defining Robustness, Vulnerabilities, and Consequential Scenarios for Diverse Stakeholder Interests in Institutionally Complex River Basins. Earth's Future, 8(7):e2020EF001503, 2020. URL: https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2020EF001503 (visited on 2020-07-13), doi:10.1029/2020EF001503.

+
+
+[155] +

Harris Drucker and Corinna Cortes. Boosting decision trees. Advances in neural information processing systems, pages 479–485, 1996.

+
+
+[156] +

Kevin P Murphy. Machine learning: a probabilistic perspective. MIT press, 2012.

+
+
+[157] +

Kelsey L. Ruckert, Gary Shaffer, David Pollard, Yawen Guan, Tony E. Wong, Chris E. Forest, and Klaus Keller. Assessing the Impact of Retreat Mechanisms in a Simple Antarctic Ice Sheet Model Using Bayesian Calibration. PLOS ONE, 12(1):e0170052, January 2017. doi:10.1371/journal.pone.0170052.

+
+
+[158] +

B. Efron and R. Tibshirani. Bootstrap Methods for Standard Errors, Confidence Intervals, and Other Measures of Statistical Accuracy. Statistical Science, 1(1):54–75, February 1986. doi:10.1214/ss/1177013815.

+
+
+[159] +

Ryan L. Sriver, Robert J. Lempert, Per Wikman-Svahn, and Klaus Keller. Characterizing uncertain sea-level rise projections to support investment decisions. PLOS ONE, 13(2):e0190641, February 2018. URL: http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0190641 (visited on 2021-06-09), doi:10.1371/journal.pone.0190641.

+
+
+[160] +

Kelsey L. Ruckert, Yawen Guan, Alexander M. R. Bakker, Chris E. Forest, and Klaus Keller. The effects of time-varying observation errors on semi-empirical sea-level projections. Climatic Change, 140(3):349–360, February 2017. URL: https://doi.org/10.1007/s10584-016-1858-z (visited on 2021-06-09), doi:10.1007/s10584-016-1858-z.

+
+
+[161] +

Neil R Edwards, David Cameron, and Jonathan Rougier. Precalibrating an intermediate complexity climate model. Clim. Dyn., 37(7-8):1469–1482, 2011. URL: http://dx.doi.org/10.1007/s00382-010-0921-0, doi:10.1007/s00382-010-0921-0.

+
+
+[162] +

Alexis Boukouvalas, Pete Sykes, Dan Cornford, and Hugo Maruri-Aguilar. Bayesian Precalibration of a Large Stochastic Microsimulation Model. IEEE Transactions on Intelligent Transportation Systems, 15(3):1337–1347, June 2014. doi:10.1109/TITS.2014.2304394.

+
+
+[163] +

David Makowski, Daniel Wallach, and Marie Tremblay. Using a Bayesian approach to parameter estimation; comparison of the GLUE and MCMC methods. Agronomie, 22(2):191–203, 2002. Publisher: EDP Sciences.

+
+
+[164] +

Mahyar Shafii, Bryan Tolson, and Loren Shawn Matott. Uncertainty-based multi-criteria calibration of rainfall-runoff models: a comparative study. Stochastic Environmental Research and Risk Assessment, 28(6):1493–1510, August 2014. doi:10.1007/s00477-014-0855-x.

+
+
+[165] +

Keith Beven and Jim Freer. Equifinality, data assimilation, and uncertainty estimation in mechanistic modelling of complex environmental systems using the GLUE methodology. Journal of hydrology, 249(1-4):11–29, 2001. Publisher: Elsevier.

+
+
+[166] +

Jasper A. Vrugt and Keith J. Beven. Embracing equifinality with efficiency: Limits of Acceptability sampling using the DREAM(LOA) algorithm. Journal of Hydrology, 559:954–971, April 2018. doi:10.1016/j.jhydrol.2018.02.026.

+
+
+[167] +

Jery R. Stedinger, Richard M. Vogel, Seung Uk Lee, and Rebecca Batchelder. Appraisal of the generalized likelihood uncertainty estimation (GLUE) method. Water Resources Research, 2008. doi:10.1029/2008WR006822.

+
+
+[168] +

Christian Robert and George Casella. Monte Carlo Statistical Methods. Springer Science & Business Media, March 2013. ISBN 978-1-4757-3071-5.

+
+
+[169] +

Christian P. Robert. The Metropolis–Hastings Algorithm. In Wiley StatsRef: Statistics Reference Online, pages 1–15. American Cancer Society, 2015. URL: https://onlinelibrary.wiley.com/doi/abs/10.1002/9781118445112.stat07834 (visited on 2021-06-14), doi:10.1002/9781118445112.stat07834.

+
+
+[170] +

James M. Flegal, Murali Haran, and Galin L. Jones. Markov Chain Monte Carlo: Can We Trust the Third Significant Figure? Statistical Science, 23(2):250–260, May 2008. Publisher: Institute of Mathematical Statistics. URL: https://projecteuclid.org/journals/statistical-science/volume-23/issue-2/Markov-Chain-Monte-Carlo--Can-We-Trust-the-Third/10.1214/08-STS257.full (visited on 2021-06-14), doi:10.1214/08-STS257.

+
+
+[171] +

Carla Currin, Toby Mitchell, Max Morris, and Don Ylvisaker. Bayesian Prediction of Deterministic Functions, with Applications to the Design and Analysis of Computer Experiments. Journal of the American Statistical Association, 86(416):953–963, December 1991. Publisher: Taylor & Francis _eprint: https://www.tandfonline.com/doi/pdf/10.1080/01621459.1991.10475138. URL: https://www.tandfonline.com/doi/abs/10.1080/01621459.1991.10475138 (visited on 2021-06-14), doi:10.1080/01621459.1991.10475138.

+
+
+[172] +

Jerome Sacks, William J. Welch, Toby J. Mitchell, and Henry P. Wynn. Design and Analysis of Computer Experiments. Statistical Science, 4(4):409–423, 1989. Publisher: Institute of Mathematical Statistics. URL: https://www.jstor.org/stable/2245858 (visited on 2021-06-14).

+
+
+[173] +

Roger G. Ghanem and Pol D. Spanos. Spectral Stochastic Finite‐Element Formulation for Reliability Analysis. Journal of Engineering Mechanics, 117(10):2351–2372, October 1991. Publisher: American Society of Civil Engineers. URL: https://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399%281991%29117%3A10%282351%29 (visited on 2021-06-14), doi:10.1061/(ASCE)0733-9399(1991)117:10(2351).

+
+
+[174] +

Dongbin Xiu and George Em Karniadakis. The Wiener–Askey Polynomial Chaos for Stochastic Differential Equations. SIAM Journal on Scientific Computing, 24(2):619–644, January 2002. Publisher: Society for Industrial and Applied Mathematics. URL: https://epubs.siam.org/doi/abs/10.1137/S1064827501387826 (visited on 2021-06-14), doi:10.1137/S1064827501387826.

+
+
+[175] +

Angelo Ciccazzo, Gianni Di Pillo, and Vittorio Latorre. A SVM Surrogate Model-Based Method for Parametric Yield Optimization. IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 35(7):1224–1228, July 2016. Conference Name: IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems. doi:10.1109/TCAD.2015.2501307.

+
+
+[176] +

W. Andrew Pruett and Robert L. Hester. The Creation of Surrogate Models for Fast Estimation of Complex Model Outcomes. PLOS ONE, 11(6):e0156574, June 2016. Publisher: Public Library of Science. URL: https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0156574 (visited on 2021-06-14), doi:10.1371/journal.pone.0156574.

+
+
+[177] +

John Eason and Selen Cremaschi. Adaptive sequential sampling for surrogate model generation with artificial neural networks. Computers & Chemical Engineering, 68:220–232, September 2014. URL: https://www.sciencedirect.com/science/article/pii/S0098135414001719 (visited on 2021-06-14), doi:10.1016/j.compchemeng.2014.05.021.

+
+
+[178] +

Dirk Gorissen, Luciano De Tommasi, Karel Crombecq, and Tom Dhaene. Sequential modeling of a low noise amplifier with neural networks and active learning. Neural Computing and Applications, 18(5):485–494, June 2009. URL: https://doi.org/10.1007/s00521-008-0223-1 (visited on 2021-06-14), doi:10.1007/s00521-008-0223-1.

+
+
+[179] +

Jenný Brynjarsdóttir and Anthony O'Hagan. Learning about physical parameters: the importance of model discrepancy. Inverse Problems, 30(11):114007, October 2014. Publisher: IOP Publishing. URL: https://doi.org/10.1088/0266-5611/30/11/114007 (visited on 2021-06-14), doi:10.1088/0266-5611/30/11/114007.

+
+
+[180] +

Michael Betancourt. A conceptual introduction to Hamiltonian Monte Carlo. arXiv preprint arXiv:1701.02434, 2017.

+
+
+[181] +

Radford M. Neal. MCMC using Hamiltonian dynamics. Handbook of markov chain monte carlo, 2(11):2, 2011.

+
+
+[182] +

Matti Vihola. Robust adaptive Metropolis algorithm with coerced acceptance rate. Statistics and Computing, 22(5):997–1008, September 2012. URL: https://doi.org/10.1007/s11222-011-9269-5 (visited on 2021-06-14), doi:10.1007/s11222-011-9269-5.

+
+
+[183] +

Perry de Valpine, Daniel Turek, Christopher J. Paciorek, Clifford Anderson-Bergman, Duncan Temple Lang, and Rastislav Bodik. Programming With Models: Writing Statistical Algorithms for General Model Structures With NIMBLE. Journal of Computational and Graphical Statistics, 26(2):403–413, April 2017. Publisher: Taylor & Francis _eprint: https://doi.org/10.1080/10618600.2016.1172487. URL: https://doi.org/10.1080/10618600.2016.1172487 (visited on 2021-06-14), doi:10.1080/10618600.2016.1172487.

+
+
+[184] +

NIMBLE Development Team. NIMBLE: MCMC, Particle Filtering, and Programmable Hierarchical Modeling. May 2021. URL: https://zenodo.org/record/4829693 (visited on 2021-06-14), doi:10.5281/zenodo.4829693.

+
+
+[185] +

Stan Development Team. Stan Modeling Language Users Guide and Reference Manual. 2021. URL: https://mc-stan.org/docs/2_27/stan-users-guide/index.html (visited on 2021-06-14).

+
+
+[186] +

John Salvatier, Thomas V. Wiecki, and Christopher Fonnesbeck. Probabilistic programming in Python using PyMC3. PeerJ Computer Science, 2:e55, April 2016. Publisher: PeerJ Inc. URL: https://peerj.com/articles/cs-55 (visited on 2021-06-14), doi:10.7717/peerj-cs.55.

+
+
+[187] +

Hong Ge, Kai Xu, and Zoubin Ghahramani. Turing: A Language for Flexible Probabilistic Inference. In International Conference on Artificial Intelligence and Statistics, 1682–1690. PMLR, March 2018. ISSN: 2640-3498. URL: http://proceedings.mlr.press/v84/ge18b.html (visited on 2021-06-14).

+
+
+[188] +

Andrew Gelman and Donald B. Rubin. Inference from Iterative Simulation Using Multiple Sequences. Statistical Science, 7(4):457–472, November 1992. Publisher: Institute of Mathematical Statistics. URL: https://projecteuclid.org/journals/statistical-science/volume-7/issue-4/Inference-from-Iterative-Simulation-Using-Multiple-Sequences/10.1214/ss/1177011136.full (visited on 2021-06-14), doi:10.1214/ss/1177011136.

+
+
+[189] +

Pierre Del Moral, Arnaud Doucet, and Ajay Jasra. Sequential Monte Carlo samplers. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 68(3):411–436, 2006. _eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/j.1467-9868.2006.00553.x. URL: http://rss.onlinelibrary.wiley.com/doi/abs/10.1111/j.1467-9868.2006.00553.x (visited on 2021-06-14), doi:10.1111/j.1467-9868.2006.00553.x.

+
+
+[190] +

Arnaud Doucet, Simon Godsill, and Christophe Andrieu. On sequential Monte Carlo sampling methods for Bayesian filtering. Statistics and Computing, 10(3):197–208, July 2000. URL: https://doi.org/10.1023/A:1008935410038 (visited on 2021-06-14), doi:10.1023/A:1008935410038.

+
+
+[191] +

Jane Liu and Mike West. Combined Parameter and State Estimation in Simulation-Based Filtering. In Arnaud Doucet, Nando de Freitas, and Neil Gordon, editors, Sequential Monte Carlo Methods in Practice, Statistics for Engineering and Information Science, pages 197–223. Springer, New York, NY, 2001. URL: https://doi.org/10.1007/978-1-4757-3437-9_10 (visited on 2021-06-14), doi:10.1007/978-1-4757-3437-9_10.

+
+
+[192] +

Stefano Cabras, Maria Eugenia Castellanos Nueda, and Erlis Ruli. Approximate Bayesian Computation by Modelling Summary Statistics in a Quasi-likelihood Framework. Bayesian Analysis, 10(2):411–439, June 2015. Publisher: International Society for Bayesian Analysis. URL: https://projecteuclid.org/journals/bayesian-analysis/volume-10/issue-2/Approximate-Bayesian-Computation-by-Modelling-Summary-Statistics-in-a-Quasi/10.1214/14-BA921.full (visited on 2021-06-14), doi:10.1214/14-BA921.

+
+
+[193] +

Jarno Lintusaari, Michael U. Gutmann, Ritabrata Dutta, Samuel Kaski, and Jukka Corander. Fundamentals and Recent Developments in Approximate Bayesian Computation. Systematic Biology, 66(1):e66–e82, January 2017. URL: https://doi.org/10.1093/sysbio/syw077 (visited on 2021-06-14), doi:10.1093/sysbio/syw077.

+
+
+[194] +

Mikael Sunnåker, Alberto Giovanni Busetto, Elina Numminen, Jukka Corander, Matthieu Foll, and Christophe Dessimoz. Approximate Bayesian Computation. PLOS Computational Biology, 9(1):e1002803, January 2013. Publisher: Public Library of Science. URL: https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1002803 (visited on 2021-06-14), doi:10.1371/journal.pcbi.1002803.

+
+
+[195] +

Edwin T. Jaynes. Probability theory: the logic of science. Washington University St. Louis, MO, 1996.

+
+
+[196] +

Andrew Gelman, Daniel Simpson, and Michael Betancourt. The Prior Can Often Only Be Understood in the Context of the Likelihood. Entropy, 19(10):555, October 2017. Number: 10 Publisher: Multidisciplinary Digital Publishing Institute. URL: https://www.mdpi.com/1099-4300/19/10/555 (visited on 2021-06-14), doi:10.3390/e19100555.

+
+
+[197] +

Christian Robert. The Bayesian choice: from decision-theoretic foundations to computational implementation. Springer Science & Business Media, 2007.

+
+
+[198] +

Andrew Gelman, Xiao-Li Meng, and Hal Stern. Posterior Predictive Assessment of Model Fitness via Realized Discrepancies. Statistica Sinica, 6(4):733–760, 1996. Publisher: Institute of Statistical Science, Academia Sinica. URL: https://www.jstor.org/stable/24306036 (visited on 2021-06-14).

+
+
+[199] +

Andrew Gelman, Aki Vehtari, Daniel Simpson, Charles C. Margossian, Bob Carpenter, Yuling Yao, Lauren Kennedy, Jonah Gabry, Paul-Christian Bürkner, and Martin Modrák. Bayesian Workflow. arXiv:2011.01808 [stat], November 2020. arXiv: 2011.01808. URL: http://arxiv.org/abs/2011.01808 (visited on 2021-06-14).

+
+
+[200] +

Andrew Gelman and Cosma Rohilla Shalizi. Philosophy and the practice of Bayesian statistics. British Journal of Mathematical and Statistical Psychology, 66(1):8–38, 2013. _eprint: https://bpspsychub.onlinelibrary.wiley.com/doi/pdf/10.1111/j.2044-8317.2011.02037.x. URL: https://bpspsychub.onlinelibrary.wiley.com/doi/abs/10.1111/j.2044-8317.2011.02037.x (visited on 2021-06-14), doi:10.1111/j.2044-8317.2011.02037.x.

+
+
+
+
+ + +
+ + + + + + + + +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/dev/docs/html/_images/Figure_1.png b/dev/docs/html/_images/Figure_1.png new file mode 100644 index 0000000..d7cbbb4 Binary files /dev/null and b/dev/docs/html/_images/Figure_1.png differ diff --git a/dev/docs/html/_images/Map_small.png b/dev/docs/html/_images/Map_small.png new file mode 100644 index 0000000..392c962 Binary files /dev/null and b/dev/docs/html/_images/Map_small.png differ diff --git a/dev/docs/html/_images/Policy_MonteCarlo_Pathways_small.png b/dev/docs/html/_images/Policy_MonteCarlo_Pathways_small.png new file mode 100644 index 0000000..5879e4c Binary files /dev/null and b/dev/docs/html/_images/Policy_MonteCarlo_Pathways_small.png differ diff --git a/dev/docs/html/_images/basin_map.png b/dev/docs/html/_images/basin_map.png new file mode 100644 index 0000000..5cb0dfe Binary files /dev/null and b/dev/docs/html/_images/basin_map.png differ diff --git a/dev/docs/html/_images/discovery_12_0.png b/dev/docs/html/_images/discovery_12_0.png new file mode 100644 index 0000000..9dbf81c Binary files /dev/null and b/dev/docs/html/_images/discovery_12_0.png differ diff --git a/dev/docs/html/_images/discovery_16_1.png b/dev/docs/html/_images/discovery_16_1.png new file mode 100644 index 0000000..9389050 Binary files /dev/null and b/dev/docs/html/_images/discovery_16_1.png differ diff --git a/dev/docs/html/_images/discovery_4_1.png b/dev/docs/html/_images/discovery_4_1.png new file mode 100644 index 0000000..e11e798 Binary files /dev/null and b/dev/docs/html/_images/discovery_4_1.png differ diff --git a/dev/docs/html/_images/discovery_6_0.png b/dev/docs/html/_images/discovery_6_0.png new file mode 100644 index 0000000..80c9bbd Binary files /dev/null and b/dev/docs/html/_images/discovery_6_0.png differ diff --git a/dev/docs/html/_images/discovery_6_1.png b/dev/docs/html/_images/discovery_6_1.png new file mode 100644 index 0000000..2b4c435 Binary files /dev/null and b/dev/docs/html/_images/discovery_6_1.png differ diff --git a/dev/docs/html/_images/discovery_9_1.png b/dev/docs/html/_images/discovery_9_1.png new file mode 100644 index 0000000..2fbbe91 Binary files /dev/null and b/dev/docs/html/_images/discovery_9_1.png differ diff --git a/dev/docs/html/_images/discovery_9_2.png b/dev/docs/html/_images/discovery_9_2.png new file mode 100644 index 0000000..4dc4c74 Binary files /dev/null and b/dev/docs/html/_images/discovery_9_2.png differ diff --git a/dev/docs/html/_images/eqn2.png b/dev/docs/html/_images/eqn2.png new file mode 100644 index 0000000..881e6da Binary files /dev/null and b/dev/docs/html/_images/eqn2.png differ diff --git a/dev/docs/html/_images/eqn4.png b/dev/docs/html/_images/eqn4.png new file mode 100644 index 0000000..2e6d6b1 Binary files /dev/null and b/dev/docs/html/_images/eqn4.png differ diff --git a/dev/docs/html/_images/figure14lake_problem_SD.png b/dev/docs/html/_images/figure14lake_problem_SD.png new file mode 100644 index 0000000..0045f50 Binary files /dev/null and b/dev/docs/html/_images/figure14lake_problem_SD.png differ diff --git a/dev/docs/html/_images/figure2_1_idealized_uc.png b/dev/docs/html/_images/figure2_1_idealized_uc.png new file mode 100644 index 0000000..17aacf1 Binary files /dev/null and b/dev/docs/html/_images/figure2_1_idealized_uc.png differ diff --git a/dev/docs/html/_images/figure3_1_global_versus_local.png b/dev/docs/html/_images/figure3_1_global_versus_local.png new file mode 100644 index 0000000..ea57348 Binary files /dev/null and b/dev/docs/html/_images/figure3_1_global_versus_local.png differ diff --git a/dev/docs/html/_images/figure3_2_factor_mapping.png b/dev/docs/html/_images/figure3_2_factor_mapping.png new file mode 100644 index 0000000..3c0d4dd Binary files /dev/null and b/dev/docs/html/_images/figure3_2_factor_mapping.png differ diff --git a/dev/docs/html/_images/figure3_3_alternative_designs.png b/dev/docs/html/_images/figure3_3_alternative_designs.png new file mode 100644 index 0000000..85bd6f2 Binary files /dev/null and b/dev/docs/html/_images/figure3_3_alternative_designs.png differ diff --git a/dev/docs/html/_images/figure3_4_morris_method.png b/dev/docs/html/_images/figure3_4_morris_method.png new file mode 100644 index 0000000..1a914b0 Binary files /dev/null and b/dev/docs/html/_images/figure3_4_morris_method.png differ diff --git a/dev/docs/html/_images/figure3_5classificationofmethods.png b/dev/docs/html/_images/figure3_5classificationofmethods.png new file mode 100644 index 0000000..af95502 Binary files /dev/null and b/dev/docs/html/_images/figure3_5classificationofmethods.png differ diff --git a/dev/docs/html/_images/figure3_6_softwaretoolkits.png b/dev/docs/html/_images/figure3_6_softwaretoolkits.png new file mode 100644 index 0000000..2903cec Binary files /dev/null and b/dev/docs/html/_images/figure3_6_softwaretoolkits.png differ diff --git a/dev/docs/html/_images/figure4_1_diagnostic_workflow.png b/dev/docs/html/_images/figure4_1_diagnostic_workflow.png new file mode 100644 index 0000000..aa83f53 Binary files /dev/null and b/dev/docs/html/_images/figure4_1_diagnostic_workflow.png differ diff --git a/dev/docs/html/_images/figure4_2_behavior_modes.png b/dev/docs/html/_images/figure4_2_behavior_modes.png new file mode 100644 index 0000000..11fec22 Binary files /dev/null and b/dev/docs/html/_images/figure4_2_behavior_modes.png differ diff --git a/dev/docs/html/_images/figure4_3_lake_problem_fluxes.png b/dev/docs/html/_images/figure4_3_lake_problem_fluxes.png new file mode 100644 index 0000000..3df337e Binary files /dev/null and b/dev/docs/html/_images/figure4_3_lake_problem_fluxes.png differ diff --git a/dev/docs/html/_images/figure4_4_exploratory_workflow.png b/dev/docs/html/_images/figure4_4_exploratory_workflow.png new file mode 100644 index 0000000..3820f4b Binary files /dev/null and b/dev/docs/html/_images/figure4_4_exploratory_workflow.png differ diff --git a/dev/docs/html/_images/figureA1_1_UQ_approaches.png b/dev/docs/html/_images/figureA1_1_UQ_approaches.png new file mode 100644 index 0000000..8828f51 Binary files /dev/null and b/dev/docs/html/_images/figureA1_1_UQ_approaches.png differ diff --git a/dev/docs/html/_images/figureA1_2_bootstrap_workflow.png b/dev/docs/html/_images/figureA1_2_bootstrap_workflow.png new file mode 100644 index 0000000..7f4c970 Binary files /dev/null and b/dev/docs/html/_images/figureA1_2_bootstrap_workflow.png differ diff --git a/dev/docs/html/_images/figureA1_3_precal_workflow.png b/dev/docs/html/_images/figureA1_3_precal_workflow.png new file mode 100644 index 0000000..afca549 Binary files /dev/null and b/dev/docs/html/_images/figureA1_3_precal_workflow.png differ diff --git a/dev/docs/html/_images/figureA1_4_mcmc_workflow.png b/dev/docs/html/_images/figureA1_4_mcmc_workflow.png new file mode 100644 index 0000000..2f54a0f Binary files /dev/null and b/dev/docs/html/_images/figureA1_4_mcmc_workflow.png differ diff --git a/dev/docs/html/_images/figureA1_5_priors_posteriors.png b/dev/docs/html/_images/figureA1_5_priors_posteriors.png new file mode 100644 index 0000000..455edfd Binary files /dev/null and b/dev/docs/html/_images/figureA1_5_priors_posteriors.png differ diff --git a/dev/docs/html/_images/fishery_output_22_0.png b/dev/docs/html/_images/fishery_output_22_0.png new file mode 100644 index 0000000..127183f Binary files /dev/null and b/dev/docs/html/_images/fishery_output_22_0.png differ diff --git a/dev/docs/html/_images/fishery_output_6_0.png b/dev/docs/html/_images/fishery_output_6_0.png new file mode 100644 index 0000000..4588dd5 Binary files /dev/null and b/dev/docs/html/_images/fishery_output_6_0.png differ diff --git a/dev/docs/html/_images/hmm_11_0.png b/dev/docs/html/_images/hmm_11_0.png new file mode 100644 index 0000000..d9d8ab2 Binary files /dev/null and b/dev/docs/html/_images/hmm_11_0.png differ diff --git a/dev/docs/html/_images/hmm_14_0.png b/dev/docs/html/_images/hmm_14_0.png new file mode 100644 index 0000000..dec4196 Binary files /dev/null and b/dev/docs/html/_images/hmm_14_0.png differ diff --git a/dev/docs/html/_images/hmm_21_0.png b/dev/docs/html/_images/hmm_21_0.png new file mode 100644 index 0000000..c0e87ca Binary files /dev/null and b/dev/docs/html/_images/hmm_21_0.png differ diff --git a/dev/docs/html/_images/hmm_25_0.png b/dev/docs/html/_images/hmm_25_0.png new file mode 100644 index 0000000..e27f714 Binary files /dev/null and b/dev/docs/html/_images/hmm_25_0.png differ diff --git a/dev/docs/html/_images/hmm_28_0.png b/dev/docs/html/_images/hmm_28_0.png new file mode 100644 index 0000000..3581c82 Binary files /dev/null and b/dev/docs/html/_images/hmm_28_0.png differ diff --git a/dev/docs/html/_images/hmm_40_0.png b/dev/docs/html/_images/hmm_40_0.png new file mode 100644 index 0000000..207cb08 Binary files /dev/null and b/dev/docs/html/_images/hmm_40_0.png differ diff --git a/dev/docs/html/_images/hmm_43_0.png b/dev/docs/html/_images/hmm_43_0.png new file mode 100644 index 0000000..e01bd7a Binary files /dev/null and b/dev/docs/html/_images/hmm_43_0.png differ diff --git a/dev/docs/html/_images/hmm_53_0.png b/dev/docs/html/_images/hmm_53_0.png new file mode 100644 index 0000000..9d1004c Binary files /dev/null and b/dev/docs/html/_images/hmm_53_0.png differ diff --git a/dev/docs/html/_images/hmm_9_0.png b/dev/docs/html/_images/hmm_9_0.png new file mode 100644 index 0000000..518a949 Binary files /dev/null and b/dev/docs/html/_images/hmm_9_0.png differ diff --git a/dev/docs/html/_images/hymod1.png b/dev/docs/html/_images/hymod1.png new file mode 100644 index 0000000..e5c5629 Binary files /dev/null and b/dev/docs/html/_images/hymod1.png differ diff --git a/dev/docs/html/_images/hymod10.png b/dev/docs/html/_images/hymod10.png new file mode 100644 index 0000000..9c48b8f Binary files /dev/null and b/dev/docs/html/_images/hymod10.png differ diff --git a/dev/docs/html/_images/hymod11.png b/dev/docs/html/_images/hymod11.png new file mode 100644 index 0000000..4e67b3e Binary files /dev/null and b/dev/docs/html/_images/hymod11.png differ diff --git a/dev/docs/html/_images/hymod12.png b/dev/docs/html/_images/hymod12.png new file mode 100644 index 0000000..f73de92 Binary files /dev/null and b/dev/docs/html/_images/hymod12.png differ diff --git a/dev/docs/html/_images/hymod2.png b/dev/docs/html/_images/hymod2.png new file mode 100644 index 0000000..c2310d4 Binary files /dev/null and b/dev/docs/html/_images/hymod2.png differ diff --git a/dev/docs/html/_images/hymod3.png b/dev/docs/html/_images/hymod3.png new file mode 100644 index 0000000..0411f21 Binary files /dev/null and b/dev/docs/html/_images/hymod3.png differ diff --git a/dev/docs/html/_images/hymod4.png b/dev/docs/html/_images/hymod4.png new file mode 100644 index 0000000..d1d2b4b Binary files /dev/null and b/dev/docs/html/_images/hymod4.png differ diff --git a/dev/docs/html/_images/hymod5.png b/dev/docs/html/_images/hymod5.png new file mode 100644 index 0000000..a58dc6b Binary files /dev/null and b/dev/docs/html/_images/hymod5.png differ diff --git a/dev/docs/html/_images/hymod6.png b/dev/docs/html/_images/hymod6.png new file mode 100644 index 0000000..0a0db28 Binary files /dev/null and b/dev/docs/html/_images/hymod6.png differ diff --git a/dev/docs/html/_images/hymod7.png b/dev/docs/html/_images/hymod7.png new file mode 100644 index 0000000..e8f6779 Binary files /dev/null and b/dev/docs/html/_images/hymod7.png differ diff --git a/dev/docs/html/_images/hymod8.png b/dev/docs/html/_images/hymod8.png new file mode 100644 index 0000000..dd3d625 Binary files /dev/null and b/dev/docs/html/_images/hymod8.png differ diff --git a/dev/docs/html/_images/hymod9.png b/dev/docs/html/_images/hymod9.png new file mode 100644 index 0000000..e5f56d0 Binary files /dev/null and b/dev/docs/html/_images/hymod9.png differ diff --git a/dev/docs/html/_images/hymod_schematic-DAVE.png b/dev/docs/html/_images/hymod_schematic-DAVE.png new file mode 100644 index 0000000..83406b4 Binary files /dev/null and b/dev/docs/html/_images/hymod_schematic-DAVE.png differ diff --git a/dev/docs/html/_images/im3.png b/dev/docs/html/_images/im3.png new file mode 100644 index 0000000..de17482 Binary files /dev/null and b/dev/docs/html/_images/im3.png differ diff --git a/dev/docs/html/_images/notebook_logistic_output_11_1.png b/dev/docs/html/_images/notebook_logistic_output_11_1.png new file mode 100644 index 0000000..6f7ca8e Binary files /dev/null and b/dev/docs/html/_images/notebook_logistic_output_11_1.png differ diff --git a/dev/docs/html/_images/output_7_0.png b/dev/docs/html/_images/output_7_0.png new file mode 100644 index 0000000..bde57bb Binary files /dev/null and b/dev/docs/html/_images/output_7_0.png differ diff --git a/dev/docs/html/_images/table1.png b/dev/docs/html/_images/table1.png new file mode 100644 index 0000000..c02f860 Binary files /dev/null and b/dev/docs/html/_images/table1.png differ diff --git a/dev/docs/html/_sources/1_introduction.rst b/dev/docs/html/_sources/1_introduction.rst new file mode 100644 index 0000000..8e6c7d7 --- /dev/null +++ b/dev/docs/html/_sources/1_introduction.rst @@ -0,0 +1,31 @@ +.. _introduction: + +************ +Introduction +************ + +This guidance text has been developed in support of the Integrated Multisector Multiscale Modeling (IM3) Science Focus Area’s objective to formally integrate uncertainty into its research tasks. IM3 is focused on innovative modeling to explore how human and natural system landscapes in the United States co-evolve in response to short-term shocks and long-term influences. The project’s challenging scope is to advance our ability to study the interactions between energy, water, land, and urban systems, at scales ranging from local (~1km) to the contiguous United States, while consistently addressing influences such as population change, technology change, heat waves, and drought. Uncertainty and careful model-driven scientific insights are central to the project's science objectives shown below. + +**IM3 Key MSD Science Objectives:** + +*Develop flexible, open-source, and integrated modeling capabilities that capture the structure, dynamic behavior, and emergent properties of the multiscale interactions within and between human and natural systems.* + +*Use these capabilities to study the evolution, vulnerability, and resilience of interacting human and natural systems and landscapes from local to continental scales, including their responses to the compounding effects of long-term influences and short-term shocks.* + +*Understand the implications of uncertainty in data, observations, models, and model coupling approaches for projections of human-natural system dynamics.* + +Addressing the objectives above poses a strong transdisciplinary challenge that depends on a diversity of models and, more specifically, a consistent framing for making model-based science inferences. The term transdisciplinary science as used here formally implies a deep integration of disciplines to aid our hypothesis-driven understanding of coupled human-natural systems--bridging differences in theory, hypothesis generation, modeling, and modes of inference :cite:p:`national2014convergence`. The IM3 MSD research foci and questions require a deep integration across disciplines, where new modes of analysis can emerge that rapidly synthesize and exploit advances for making decision-relevant insights that at minimum acknowledge uncertainty and more ideally promote a rigorous quantitative mapping of its effects on the generality of claimed scientific insights. More broadly, diverse scientific disciplines engaged in the science of coupled human-natural systems, ranging from natural sciences to engineering and economics, employ a diversity of numerical computer models to study and understand their underlying systems of focus. The utility of these computer models hinges on their ability to represent the underlying real systems with sufficient fidelity and enable the inference of novel insights. This is particularly challenging in the case of coupled human-natural systems where there exists a multitude of interdependent human and natural processes taking place that could potentially be represented. These processes usually translate into modeled representations that are highly complex, non-linear, and exhibit strong interactions and threshold behaviors :cite:p:`elsawah2020eight, haimes2018risk,helbing2013globally`. Model complexity and detail have also been increasing as a result of our improving understanding of these processes, the availability of data, and the rapid growth in computing power :cite:p:`saltelli2019so`. As model complexity grows, modelers need to specify a lot more information than before: additional model inputs and relationships as more processes are represented, higher resolution data as more observations are collected, new coupling relationships and interactions as diverse models are being used in combination to answer multisector questions (e.g., the land-water-energy nexus). Typically, not all of this information is well known, nor is the impact of these many uncertainties on model outputs well understood. It is further especially difficult to distinguish the effects of individual as well as interacting sources of uncertainty when modeling coupled systems with multisector and multiscale dynamics :cite:p:`wirtz2017rocky`. + +Given the challenge and opportunity posed by the disciplinary diversity of IM3, we utilized an informal team-wide survey to understand how the various disciplines typically address uncertainty, emphasizing key literature examples and domain-specific reviews. The feedback received provided perspectives across diverse areas within the Earth sciences, different engineering fields, as well as economics. Although our synthesis of this survey information highlighted some commonality across areas (e.g., the frequent use of scenario-based modeling), we identified key differences in vocabulary, the frequency with which formal uncertainty analysis appears in the disciplinary literature, and technical approaches. The IM3 team’s responses captured a very broad conceptual continuum of methodological traditions, ranging from deterministic (no uncertainty) modeling to the theoretical case of fully engaging in modeling sources of uncertainty. Overall, error-driven analyses that focus on replicating prior observed conditions were reported to be the most prevalent types of studies for all disciplines. It was generally less common for studies to strongly engage with analyzing uncertainty via more formal ensemble analyses and design of experiments, though some areas did show significantly higher levels of activity. Another notable finding from our survey was the apparent lack of focus on understanding how model coupling relationships shape uncertainty. Although these observations are limited to the scope of feedback attained in the team-wide IM3 survey responses and the bodies of literature reported by respondents, we believe they reflect challenges that are common across the MSD community. + +In the IM3 uncertainty-related research that has occurred since this survey, we have observed that differences in terminology and interpretation of terminology across modeling teams can be confounding. One of the goals of this eBook is to provide a common language for uncertainty analysis within IM3 and, hopefully, for the broader MSD community. While individual scientific disciplines would be expected to retain their own terminology, by providing explicit definitions of terms we can facilitate the translation of concepts across transdisciplinary science teams. To begin, we use the term Uncertainty Analysis (UA) as an umbrella phrase covering all methods in this eBook. Next, we distinguish the key terms of uncertainty quantification (UQ) and uncertainty characterization (UC). UQ refers to the formal focus on the full specification of likelihoods as well as the distributional forms necessary to infer the joint probabilistic response across all modeled factors of interest :cite:p:`cooke1991experts`. UC refers to exploratory modeling of alternative hypotheses to understand the co-evolutionary dynamics of influences and stressors, as well as path dependent changes in the form and function of modelled systems :cite:p:`moallemi2020exploratory,walker2003defining`. As discussed in later sections, the choice of UC or UQ depends on the specific goals of studies, the availability of data, the types of uncertainties (e.g., well-characterized or deep), and the complexity of underlying models as well as computational limits. Definitions of key uncertainty analysis terms used in this eBook appear below, and our Glossary (:numref:`glossary`) contains a complete list of terms. + +* **Exploratory modeling**: Use of large ensembles of uncertain conditions to discover decision-relevant combinations of uncertain factors +* **Factor**: Any model component that can affect model outputs: inputs, resolution levels, coupling relationships, model relationships and parameters. In models with acceptable model fidelity these factors may represent elements of the real-world system under study. +* **Sensitivity analysis**: Model evaluation to understand the factors and processes that most (or least) control a model’s outputs + * **Local sensitivity analysis**: Varying uncertain factors around specific reference values + * **Global sensitivity analysis**: Varying uncertain factors throughout their entire feasible value space +* **Uncertainty characterization**: Model evaluation under alternative factor hypotheses to explore their implications for model output uncertainty +* **Uncertainty quantification**: Representation of model output uncertainty using probability distributions + +At present, there is no singular guide for confronting the computational and conceptual challenges of the multi-model, transdisciplinary workflows that characterize ambitious projects such as IM3 :cite:p:`saltelli2015climate`. The primary aim of this text is to begin to address this gap and provide guidance for facing these challenges. :numref:`2_diagnostic_modeling` provides an overview of diagnostic modeling and the different perspectives for how we should evaluate our models, :numref:`3_sensitivity_analysis_the_basics` summarizes basic methods and concepts for sensitivity analysis, and :numref:`4_sensitivity_analysis` delves into more technical applications of sensitivity analysis to support diagnostic model evaluation and exploratory modeling. Finally, :numref:`5_conclusion` provides some concluding remarks across the UC and UQ topics covered in this text. The appendices of this text include a glossary of the key concepts, an overview of UQ methods, and coding-based illustrative examples of key UC concepts discussed in earlier chapters. diff --git a/dev/docs/html/_sources/2.1_overview_of_model_diagnostics.rst b/dev/docs/html/_sources/2.1_overview_of_model_diagnostics.rst new file mode 100644 index 0000000..c2df478 --- /dev/null +++ b/dev/docs/html/_sources/2.1_overview_of_model_diagnostics.rst @@ -0,0 +1,15 @@ +Overview of model diagnostics +############################# + +Model diagnostics provide a rich basis for hypothesis testing, model innovation, and improved inferences when classifying what is controlling highly consequential results (e.g., vulnerability or resilience in coupled human-natural systems). :numref:`Figure_2_1`, adapted from :cite:p:`saltelli2019so`, presents idealized illustrations of the relationship between UC and global sensitivity analysis for two coupled simulation models. The figure illustrates how UC can be used to address how uncertainties in various modeling decisions (e.g., data inputs, parameters, model structures, coupling relationships) can be sampled and simulated to yield the empirical model output distribution(s) of interest. Monte Carlo frameworks allow us to sample and propagate (or integrate) the ensemble response of the model(s) of focus. The first step of any UC analysis is the specification of the initial input distributions as illustrated in :numref:`Figure_2_1`. The second step is to perform the Monte Carlo simulations. The question can then be raised, which of the modeling assumptions in our Monte Carlo experiment are the most responsible for the resulting output uncertainty. We can answer this question using global sensitivity analysis as illustrated in :numref:`Figure_2_1`. Global sensitivity analysis can be defined as a formal Monte Carlo sampling and analysis of modeling choices (structures, parameters, inputs) to quantify their influence on direct model outputs (or output-informed metrics). UC experiments by themselves do not explain why a particular uncertain outcome is produced, but produce distributions of model outcomes, as portrayed by the yellow curve. The pie chart shown in :numref:`Figure_2_1` is a conceptual representation of the results of using a global sensitivity analysis to identify those factors that are most dominantly influencing results, either individually or interactively :cite:`saltelli_global_2008`. + +.. _Figure_2_1: +.. figure:: _static/figure2_1_idealized_uc.png + :alt: Figure 2.1 + :width: 700px + :figclass: margin-caption + :align: center + + Idealized uncertainty characterization and global sensitivity analysis for two coupled simulation models. Uncertainty coming from various sources (e.g., inputs, model structures, coupling relationships) is propagated through the coupled model(s) to generate empirical distributions of outputs of interest (uncertainty characterization). This model output uncertainty can be decomposed to its origins, by means of sensitivity analysis. Figure adapted from :cite:t:`saltelli2019so`. + +UC and global sensitivity analysis are not independent modeling analyses. As illustrated here, any global sensitivity analysis requires an initial UC hypothesis in the form of statistical assumptions and representations for the modeling choices of focus (structural, parametric, and data inputs). Information from these two model diagnostic tools can then be used to inform data needs for future model runs, experiments to reduce the uncertainty present, or the simplification or enhancement of the model where necessary. Together UC and global sensitivity analysis provide a foundation for diagnostic exploratory modeling that has a consistent focus on the assumptions, structural model forms, alternative parameterizations, and input data sets that are used to characterize the behavioral space of one or more models. diff --git a/dev/docs/html/_sources/2.2_perspectives_on_diagnostic_model_evaluation.rst b/dev/docs/html/_sources/2.2_perspectives_on_diagnostic_model_evaluation.rst new file mode 100644 index 0000000..9af331d --- /dev/null +++ b/dev/docs/html/_sources/2.2_perspectives_on_diagnostic_model_evaluation.rst @@ -0,0 +1,18 @@ +.. _perspectives: + +Perspectives on diagnostic model evaluation +########################################### + +When we judge or diagnose models, the terms "verification" and "validation" are commonly used. However, their appropriateness in the context of numerical models representing complex coupled human-natural systems is questionable :cite:`beven2002towards, oreskes_verification_1994`. The core issue relates to the fact that these systems are often not fully known or perfectly implemented when modeled. Rather, they are defined within specific system framings and boundary conditions in an evolving learning process with the goal of making continual progress towards attaining higher levels of fidelity in capturing behaviors or properties of interest. Evaluating the fidelity of a model's performance can be highly challenging. For example, the observations used to evaluate the fidelity of parameterized processes are often measured at a finer resolution than what is represented in the model, creating the challenge of how to manage their relative scales when performing evaluation. In other cases, numerical models may neglect or simplify system processes because sufficient data is not available or the physical mechanisms are not fully known. If sufficient agreement between prediction and observation is not achieved, it is challenging to know whether these types of modeling choices are the cause, or if other issues, such as deficiencies in the input parameters and/or other modeling assumptions are the true cause of errors. Even if there is high agreement between prediction and observation, the model cannot necessarily be considered validated, as it is always possible that the right values were produced for the wrong reasons. For example, low error can stem from a situation where different errors in underlying assumptions or parameters cancel each other out (“compensatory errors”). Furthermore, coupled human-natural system models are often subject to “equifinality”, a situation where multiple parameterized formulations can produce similar outputs or equally acceptable representations of the observed data. There is therefore no uniquely “true” or validated model, and the common practice of selecting “the best” deterministic calibration set is more of an assumption than a finding :cite:`beven1993prophecy, beven_future_1992`. The situation becomes even more tenuous when observational data is limited in its scope and/or quality to be insufficient to distinguish model representations or their performance differences. + +These limitations on model verification undermine any purely positivist treatment of model validity: that a model should correctly and precisely represent reality to be valid. Under this perspective, closely related to empiricism, statistical tests should be used to compare the model’s output with observations and only through empirical verification can a model or theory be deemed credible. A criticism to this viewpoint (besides the aforementioned challenges for model verification) is that it reduces the justification of a model to the single criterion of predictive ability and accuracy :cite:`barlas_philosophical_1990`. Authors have argued that this ignores the explanatory power held in models and other procedures, which can also advance scientific knowledge :cite:`toulmin_form_1977`. These views gave rise to relativist perspectives of science, which instead place more value on model utility in terms of fitness for a specific purpose or inquiry, rather than representational accuracy and predictive ability :cite:`kleindorfer_validation_1998`. This viewpoint appears to be most prevalent among practitioners seeking decision-relevant insights (i.e., inspire new views vs. predict future conditions). The relativist perspective argues for the use of models as heuristics that can enhance our understanding and conceptions of system behaviors or possibilities :cite:`eker2018practice`. In contrast, natural sciences favor a positivist perspective, emphasizing similarity between simulation and observation even in application contexts where it is clear that projections are being made for conditions that have never been observed and the system of focus will have evolved structurally beyond the model representation being employed (e.g., decadal to centennial evolution of human-natural systems). + +These differences in prevalent perspectives are mirrored in how model validation is defined by the two camps: From the relativist perspective, validation is seen as a process of incremental “confidence building” in a model as a mechanism for insight :cite:`barlas_formal_1996`, whereas in natural sciences validation is framed as a way to classify a model as having an acceptable representation of physical reality :cite:`oreskes_verification_1994`. Even though the relativist viewpoint does not dismiss the importance of representational accuracy, it does place it within a larger process of establishing confidence through a variety of tools. These tools, not necessarily quantitative, include communicating information between practitioners and modelers, interpreting a multitude of model outputs, and contrasting preferences and viewpoints. + +On the technical side of the argument, differing views on the methodology of model validation appear as early as in the 1960’s. :cite:t:`naylor_verification_1967` argue that model validation should not be limited to a single metric or test of performance (e.g., a single error metric), but should rather be extended to multiple tests that reflect different aspects of a model’s structure and behavior. This and similar arguments are made in literature to this day :cite:`beven2018hypothesis, gupta2012towards, gupta_reconciling_2008, kumar_typology_2011, nearing_does_2020` and are primarily founded on two premises. First, that even though modelers widely recognize that their models are abstractions of the truth, they still make truth claims based on traditional performance metrics that measure the divergence of their model from observation :cite:`nearing_does_2020`. Second, that the natural systems mimicked by the models contain many processes that exhibit significant heterogeneity at various temporal and spatial scales. This heterogeneity is lost when a single performance measure is used, as a result of the inherent loss of process information occurring when transitioning from a highly dimensional and interactive system to the dimension of a single metric :cite:`beven2002towards`. These arguments are further elaborated in :numref:`4_sensitivity_analysis`. + +Multiple authors have proposed that the traditional reliance on single measures of model performance should be replaced by the evaluation of several model signatures (characteristics) to identify model structural errors and achieve a sufficient assessment of model performance :cite:`gupta_toward_1998, gupta_reconciling_2008, pianosi_understanding_2017, rouge_coordination_2019`. There is however a point of departure here, especially when models are used to produce inferences that can inform decisions. When agencies and practitioners use models of their systems for public decisions, those models have already met sufficient conditions for credibility (e.g., acceptable representational fidelity), but may face broader tests on their salience and legitimacy in informing negotiated decisions :cite:`cash_knowledge_2003, eker2018practice, white_credibility_2010`. This presents a new challenge to model validation, that of selecting decision-relevant performance metrics, reflective of the system’s stakeholders' viewpoints, so that the most consequential uncertainties are identified and addressed :cite:`saltelli_when_2014`. For complex multisector models at the intersection of climatic, hydrologic, agricultural, energy, or other processes, the output space is made up of a multitude of states and variables, with very different levels of salience to the system's stakeholders and to their goals being achieved :cite:`wagener_what_2019`. This is further complicated when such systems are also institutionally and dynamically complex. As a result, a broader set of qualitative and quantitative performance metrics is necessary to evaluate models of such complex systems, one that embraces the plurality of value systems, agencies and perspectives present. For IM3, even though the goal is to develop better projections of future vulnerability and resilience in co-evolving human-natural systems and not to provide decision support per se, it is critical for our multisector, multiscale model evaluation processes to represent stakeholders’ adaptive decision processes credibly. + +As a final point, when a model is used in a projection mode, its results are also subject to additional uncertainty, as there is no guarantee that the model’s functionality and predictive ability will stay the same as the baseline, where the verification and validation tests were conducted. This challenge requires an additional expansion of the scope of model evaluation: a broader set of uncertain conditions needs to be explored, spanning beyond historical observation and exploring a wide range of unprecedented conditions. This perspective on modeling, termed exploratory :cite:`bankes_exploratory_1993`, views models as computational experiments that can be used to explore vast ensembles of potential scenarios to identify those with consequential effects. Exploratory modeling literature explicitly orients experiments toward stakeholder consequences and decision-relevant inferences and shifts the focus from predicting future conditions to *discovering* which conditions lead to undesirable or desirable consequences. + +This evolution in modeling perspectives can be mirrored by the IM3 family of models in a progression from evaluating models relative to observed history to advanced formalized analyses to make inferences on multisector, multiscale vulnerabilities and resilience. Exploratory modeling approaches can help fashion experiments with large numbers of alternative hypotheses on the co-evolutionary dynamics of influences, stressors, as well as path-dependent changes in the form and function of human-natural systems :cite:`weaver_improving_2013`. The aim of this text is to therefore guide the reader through the use of sensitivity analysis (SA) methods across these perspectives on diagnostic and exploratory modeling. diff --git a/dev/docs/html/_sources/2_diagnostic_modeling_overview_and_perspectives.rst b/dev/docs/html/_sources/2_diagnostic_modeling_overview_and_perspectives.rst new file mode 100644 index 0000000..845c971 --- /dev/null +++ b/dev/docs/html/_sources/2_diagnostic_modeling_overview_and_perspectives.rst @@ -0,0 +1,25 @@ +.. _2_diagnostic_modeling: + +********************************************* +Diagnostic Modeling Overview and Perspectives +********************************************* + +This text prescribes a formal model diagnostic approach that is a deliberative and iterative combination of state-of-the-art UC and global sensitivity analysis techniques that progresses from observed history-based fidelity evaluations to forward looking resilience and vulnerability inferences :cite:`gupta_reconciling_2008, hadjimichael_advancing_2020`. + +.. include:: 2.1_overview_of_model_diagnostics.rst + +.. include:: 2.2_perspectives_on_diagnostic_model_evaluation.rst + +.. note:: + The following articles are suggested as fundamental reading for the information presented in this section: + + * Naomi Oreskes, Kristin Shrader–Frechette, and Kenneth Belitz. Verification, Validation, and Confirmation of Numerical Models in the Earth Sciences. *Science*, 263 (5147): 641-646, February 1994. URL: https://science.sciencemag.org/content/263/5147/641. DOI: https://doi.org/10.1126/science.263.5147.641. + * Keith Beven. Towards a coherent philosophy for modelling the environment. *Proceedings of the Royal Society of London*. Series A: mathematical, physical and engineering sciences, 458 (2026): 2465-2484, 2002. + * Eker, S., Rovenskaya, E., Obersteiner, M., Langan, S., 2018. Practice and perspectives in the validation of resource management models. *Nature Communications* 9, 1–10. https://doi.org/10.1038/s41467-018-07811-9 + + The following articles can be used as supplemental reading: + + * Canham, C.D., Cole, J.J., Lauenroth, W.K. (Eds.), 2003. Models in Ecosystem Science. Princeton University Press. https://doi.org/10.2307/j.ctv1dwq0tq + + + diff --git a/dev/docs/html/_sources/3.1_global_versus_local_sensitivity.rst b/dev/docs/html/_sources/3.1_global_versus_local_sensitivity.rst new file mode 100644 index 0000000..f6704ce --- /dev/null +++ b/dev/docs/html/_sources/3.1_global_versus_local_sensitivity.rst @@ -0,0 +1,17 @@ +.. _global_vs_local: + +Global Versus Local Sensitivity +############################### + +Out of the several definitions for sensitivity analysis presented in the literature, the most widely used has been proposed by :cite:t:`saltelli2004sensitivity` as “the study of how uncertainty in the output of a model (numerical or otherwise) can be apportioned to different sources of uncertainty in the model input”. In other words, sensitivity analysis explores the relationship between the model’s :math:`N` input variables, :math:`x=[x_1,x_2,...,x_N]`, and :math:`M` output variables, :math:`y=[y_1,y_2,...,y_M]` with :math:`y=g(x)`, where :math:`g` is the model that maps the model inputs to the outputs :cite:p:`borgonovo2016sensitivity`. + +Historically, there have been two broad categories of sensitivity analysis techniques: local and global. Local sensitivity analysis is performed by varying model parameters around specific reference values, with the goal of exploring how small input perturbations influence model performance. Due to its ease-of-use and limited computational demands, this approach has been widely used in literature, but has important limitations :cite:p:`rakovec2014distributed,saltelli2010avoid`. If the model is not linear, the results of local sensitivity analysis can be heavily biased, as they are strongly influenced by independence assumptions and a limited exploration of model inputs (e.g., :cite:t:`tang2007comparing`). If the model’s factors interact, local sensitivity analysis will underestimate their importance, as it does not account for those effects (e.g., :cite:p:`hamm2006variance`). In general, as local sensitivity analysis only partially and locally explores a model's parametric space, it is not considered a valid approach for nonlinear models :cite:p:`saltelli2019`. This is illustrated in :numref:`Figure_3_1` (a-b), presenting contour plots of a model response (:math:`y`) with an additive linear model (a) and with a nonlinear model (b). In a linear model without interactions between the input terms :math:`x_1` and :math:`x_2`, local sensitivity analysis (assuming deviations from some reference values) can produce appropriate sensitivity indices (:numref:`Figure_3_1` (a)). If however, factors :math:`x_1` and :math:`x_2` interact, the local and partial consideration of the space can not properly account for each factor’s effects on the model response (:numref:`Figure_3_1` (b)), as it is only informative at the reference value where it is applied. In contrast, a global sensitivity analysis varies uncertain factors within the entire feasible space of variable model responses (:numref:`Figure_3_1` (c)). This approach reveals the global effects of each parameter on the model output, including any interactive effects. For models that cannot be proven linear, global sensitivity analysis is preferred and this text is primarily discussing global sensitivity analysis methods. In the text that follows, whenever we use the term sensitivity analysis we are referring to its global application. + +.. _Figure_3_1: +.. figure:: _static/figure3_1_global_versus_local.png + :alt: Figure 3.1 + :width: 700px + :figclass: margin-caption + :align: center + + Treatment of a two-dimensional space of variability by local (panels a-b) and global (panel c) sensitivity analyses. Panels depict contour plots with the value of a model response (:math:`y`) changing with changes in the values of input terms :math:`x_1` and :math:`x_2`. Local sensitivity analysis is only an appropriate approach to sensitivity in the case of linear models without interactions between terms, for example in panel (a), where :math:`y=3x_1+5x_2`. In the case of more complex models, for example in panels (b-c), where :math:`y={1 \above 1pt e^{x^2_1+x^2_2}} + {50 \above 1pt e^{(0.1x_1)^2+(0.1x_2)^3}}`, local sensitivity will miscalculate sensitivity indices as the assessed changes in the value :math:`y` depend on the assumed base values chose for :math:`x_1` and :math:`x_2` (panel (b)). In these cases, global sensitivity methods should be used instead (panel (c)). The points in panel (c) are generated using a uniform random sample of :math:`n=50`, but many other methods are available. diff --git a/dev/docs/html/_sources/3.2_why_perform_sensitivity_analysis.rst b/dev/docs/html/_sources/3.2_why_perform_sensitivity_analysis.rst new file mode 100644 index 0000000..7b7ceea --- /dev/null +++ b/dev/docs/html/_sources/3.2_why_perform_sensitivity_analysis.rst @@ -0,0 +1,33 @@ +.. _why_SA: + +Why Perform Sensitivity Analysis +################################ + +It is important to understand the many ways in which a SA might be of use to your modeling effort. Most commonly, one might be motivated to perform sensitivity analysis for the following reasons: + +*Model evaluation*: Sensitivity analysis can be used to gauge model inferences when assumptions about the structure of the model or its parameterization are dubious or have changed. For instance, consider a numerical model that uses a set of calibrated parameter values to produce outputs, which we then use to inform decisions about the real-world system represented. One might like to know if small changes in these parameter values significantly change this model’s output and the decisions it informs or if, instead, our parameter inferences yield stable model behavior regardless of the uncertainty present in the specific parameterized processes or properties. This can either discredit or lend credence to the model at hand, as well as any inferences drawn that are founded on its accurate representation of the system. Sensitivity analysis can identify which uncertain model factors cause this undesirable model behavior. + +*Model simplification*: Sensitivity analysis can also be used to identify factors or components of the model that appear to have limited effects on direct outputs or metrics of interest. Consider a model that has been developed in an organization for the purposes of a specific research question and is later used in the context of a different application. Some processes represented in significant detail might no longer be of the same importance while consuming significant data or computational resources, as different outputs might be pertinent to the new application. Sensitivity analysis can be used to identify unimportant model components and simplify them to nominal values and reduced model forms. Model complexity and computational costs can therefore be reduced. + +*Model refinement*: Alternatively, sensitivity analysis can reveal the factors or processes that are highly influential to the outputs or metrics of interest, by assessing their relative importance. In the context of model evaluation, this can inform which model components warrant additional investigation or measurement so the uncertainty surrounding them and the resulting model outputs or metrics of interest can be reduced. + +*Exploratory modeling*: When sufficient credence has been established in the model, sensitivity analysis can be applied to a host of other inquiries. Inferences about the factors and processes that most (or least) control a model’s outputs of interest can be extrapolated to the real system they represent and be used in a heuristic manner to inform model-based inferences. On this foundation, a model paired with the advanced techniques presented in this text can be used to “discover” decision relevant and highly consequential outcomes (i.e., scenario discovery, discussed in more detail in :numref:`consequential_scenarios` :cite:p:`bankes_exploratory_1993, bryant2010thinking`). + +The nature and context of the model shapes the specific objectives of applying a sensitivity analysis, as well as methods and tools most appropriate and defensible for each application setting :cite:`saltelli2004sensitivity, saltelli2002relative, wagener_what_2019`. The three most common sensitivity analysis modes (*Factor Prioritization*, *Factor Fixing*, and *Factor Mapping*) are presented below, but the reader should be aware that other uses have been proposed in the literature (e.g., :cite:`anderson2014uncertainty,borgonovo2010sensitivity`). + +*Factor prioritization*: This sensitivity analysis application mode (also referred to as *factor ranking*) refers to when one would like to identify the uncertain factors that have the greatest impact on the variability of the output, and which, when fixed to their true value (i.e., if there were no uncertainty regarding their value), would lead to the greatest reduction in output variability :cite:`saltelli2008global`. Information from this type of analysis can be crucial to model improvement as these factors can become the focus of future measurement campaigns or numerical experiments so that uncertainty in the model output can be reduced. The impact of each uncertain input on the variance of the model output is often used as the criterion for factor prioritization. :numref:`Figure_3_2` (a) shows the effects of three uncertain variables (:math:`X_1`, :math:`X_2`, and :math:`X_3`) on the variance of output :math:`Y`. :math:`V(E(Y|X_i))` indicates the variance in :math:`Y` if factor :math:`X_i` is left to vary freely while all other factors remain fixed to nominal values. In this case, factor :math:`X_2` makes the largest contribution to the variability of output :math:`Y` and it should therefore be prioritized. In the context of risk analysis, factor prioritization can be used to reduce output variance to below a given tolerable threshold (also known as variance cutting). + +*Factor fixing*: This mode of sensitivity analysis (also referred to as *factor screening*) aims to identify the model components that have a negligible effect or make no significant contributions to the variability of the outputs or metrics of interest (usually referred to as non-influential :cite:`saltelli2008global`). In the stylized example of :numref:`Figure_3_2` (a), :math:`X_1` makes the smallest contribution to the variability of output :math:`Y` suggesting that the uncertainty in its value could be negligible and the factor itself fixed in subsequent model executions. Eliminating these factors or processes in the model or fixing them to a nominal value can help reduce model complexity as well as the unnecessary computational burden of subsequent model runs, results processing, or other sensitivity analyses (the fewer uncertain factors considered, the fewer runs are necessary to illuminate their effects on the output). Significance of the outcome can be gauged in a variety of manners, depending on the application. For instance, if applying a variance-based method, a minimum threshold value of contribution to the variance could be considered as a significance ‘cutoff’, and factors with indices below that value can be considered non-influential. Conclusions about factor fixing should be made carefully, considering all of the effects a factor has, individually and in interaction with other factors (explained in more detail in the :numref:`variance-based_methods`). + +*Factor mapping*: Finally, factor mapping can be used to pinpoint which values of uncertain factors lead to model outputs within a given range of the output space :cite:`saltelli2008global`. In the context of model diagnostics, it is possible that the model’s output changes in ways considered impossible based on the represented processes, or other observed evidence. In this situation, factor mapping can be used to identify which uncertain model factors cause this undesirable model behavior by ‘filtering’ model runs that are considered ‘non-behavioral’ :cite:`edwards2011precalibrating,pianosi2016sensitivity,spear1980eutrophication`. In :numref:`Figure_3_2` (b), region :math:`B` of the output space :math:`Y` denotes the set of behavioral model outcomes and region :math:`\bar{B}` denotes the set of non-behavioral outcomes, resulting from the entirety of input space :math:`X`. Factor mapping refers to the process of tracing which factor values of input space :math:`X` produce the behavioral model outcomes in the output space. + +.. _Figure_3_2: +.. figure:: _static/figure3_2_factor_mapping.png + :alt: Figure 3.2 + :width: 500px + :figclass: margin-caption + :align: center + + Factor prioritization, factor fixing and factor mapping settings of sensitivity analysis. + +The language used above reflects a use of sensitivity analysis for model fidelity evaluation and refinement. However, as previously mentioned, when a model has been established as a sufficiently accurate representation of the system, sensitivity analysis can produce additional inferences (i.e., exploratory modeling and scenario discovery). For instance, under the factor mapping use, the analyst can now focus on undesirable system states and discover which factors are most responsible for them: for instance, “population growth of above 25% would be responsible for unacceptably high energy demands”. Factor prioritization and factor fixing can be used to make equivalent inferences, such as “growing populations and increasing temperatures are the leading factors for changing energy demands” (prioritizing of factors) or “changing dietary needs are inconsequential to increasing energy demands for this region” (a factor that can be fixed in subsequent model runs). All these inferences hinge on the assumption that the real system’s stakeholders consider the model states faithful enough representations of system states. As elaborated in :numref:`perspectives`, this view on sensitivity analysis is founded on a relativist perspective on modeling, which tends to place more value on model usefulness rather than strict accuracy of representation in terms of error. As such, sensitivity analysis performed with decision-making relevance in mind will focus on model outputs or metrics that are consequential and decision relevant (e.g., energy demand in the examples above). \ No newline at end of file diff --git a/dev/docs/html/_sources/3.3.1_one_at_a_time_oat.rst b/dev/docs/html/_sources/3.3.1_one_at_a_time_oat.rst new file mode 100644 index 0000000..79810d0 --- /dev/null +++ b/dev/docs/html/_sources/3.3.1_one_at_a_time_oat.rst @@ -0,0 +1,4 @@ +One-At-a-Time (OAT) +******************* + +In this approach, only one model factor is changed at a time while all others are kept fixed across each iteration in a sampling sequence. The OAT method assumes that model factors of focus are linearly independent (i.e., there are no interactions) and can analyze how factors individually influence model outputs or metrics of interest. While popular given its ease of implementation, OAT is ultimately limited in its exploration of a model’s sensitivities :cite:p:`saltelli2008global`. It is primarily used with local sensitivity techniques with similar criticisms: applying this sampling scheme on a system with nonlinear and interactive processes will miss important information on the effect uncertain factors have on the model. OAT samplings can be repeated multiple times in a more sophisticated manner and across different locations of the parameter space to overcome some of these challenges, which would increase computational costs and negate the main reasons for its selection. Given these limitations OAT methods could be used as preliminary, low-cost analyses of the factors' individual effects, but should ultimately be complemented with more sophisticated methods. diff --git a/dev/docs/html/_sources/3.3.2_full_fractional_factorial_sampling.rst b/dev/docs/html/_sources/3.3.2_full_fractional_factorial_sampling.rst new file mode 100644 index 0000000..d7a5a45 --- /dev/null +++ b/dev/docs/html/_sources/3.3.2_full_fractional_factorial_sampling.rst @@ -0,0 +1,15 @@ +Full and Fractional Factorial Sampling +************************************** + +In full factorial sampling each factor is treated as being discrete by considering two or more levels (or intervals) of its values. The sampling process then generates samples within each possible combination of levels, corresponding to each parameter. This scheme produces a more comprehensive sampling of the factors’ variability space, as it accounts for all candidate combinations of factor levels (:numref:`Figure_3_3` (a)). If the number of levels is the same across all factors, the number of generated samples is estimated using :math:`n^k`, where :math:`n` is the number of levels and :math:`k` is the number of factors. For example, :numref:`Figure_3_3` (a) presents a full factorial sampling of three uncertain factors :math:`(x_1,` :math:`x_2,` and :math:`x_3)`, each considered as having four discrete levels. The total number of samples necessary for such an experiment is :math:`4^3=64`. As the number of factors increases, the number of simulations necessary will also grow exponentially, making full factorial sampling computationally burdensome (:numref:`Figure_3_3` (b)). As a result, it is common in the literature to apply full factorial sampling at only two levels per factor, typically the two extremes :cite:`montgomery2017design`. This significantly reduces computational burden but is only considered appropriate in cases where factors can indeed only assume two discrete values (e.g., when testing the effects of epistemic uncertainty and comparing between model structure A and model structure B). In the case of physical parameters on continuous distributions (e.g., when considering the effects of measurement uncertainty in a temperature sensor), discretizing the range of a factor to only extreme levels can bias its estimated importance. + +Fractional factorial sampling is a widely used alternative to full factorial sampling that allows the analyst to significantly reduce the number of simulations by focusing on the main effects of a factor and seeking to avoid model runs that yield redundant response information :cite:p:`saltelli2008global`. In other words, if one can reasonably assume that higher-order interactions are negligible, information about the most significant effects and lower-order interactions (e.g., effects from pairs of factors) can be obtained using a fraction of the full factorial design. Traditionally, fractional factorial design has also been limited to two levels :cite:p:`montgomery2017design`, referred to as Fractional Factorial designs 2k-p :cite:p:`box19612`. Recently, Generalized Fractional Factorial designs have also been proposed that allow for the structured generation of samples at more than two levels per factor :cite:p:`surowiec2017generalized`. Consider a case where the modeling team dealing with the problem in :numref:`Figure_3_3` (a) cannot afford to perform 64 simulations of their model. They can afford 32 runs for their experiment and instead decide to fractionally sample the variability space of their factors. A potential design of such a sampling strategy is presented in :numref:`Figure_3_3` (c). + +.. _Figure_3_3: +.. figure:: _static/figure3_3_alternative_designs.png + :alt: Figure 3.3 + :width: 500px + :figclass: margin-caption + :align: center + + Alternative designs of experiments and their computational costs for three uncertain factors :math:`(x_1,` :math:`x_2,` and :math:`x_3)`. (a) Full factorial design sampling of three factors at four levels, at a total of 64 samples; (b) exponential growth of necessary number of samples when applying full factorial design at four levels; (c) fractional factorial design of three factors at four levels, at a total of 32 samples; and (d) Latin Hypercube sample of three factors with uniform distributions, at a total of 32 samples. diff --git a/dev/docs/html/_sources/3.3.3_latin_hypercube_sampling.rst b/dev/docs/html/_sources/3.3.3_latin_hypercube_sampling.rst new file mode 100644 index 0000000..c013abb --- /dev/null +++ b/dev/docs/html/_sources/3.3.3_latin_hypercube_sampling.rst @@ -0,0 +1,6 @@ +Latin Hypercube Sampling (LHS) +***************************** + +Latin hypercube sampling (LHS) :cite:p:`mckay1979` is one of the most common methods in space-filling experimental designs. With this sampling technique, for :math:`N` uncertain factors, an :math:`N`-dimensional hypercube is generated, with each factor divided into an equal number of levels depending on the total number of samples to be generated. Equal numbers of samples are then randomly generated at each level, across all factors. In this manner, latin hypercube design guarantees sampling from every level of the variability space and without any overlaps. When the number of samples generated is much larger than the number of uncertain factors, LHS can be very effective in examining the effects of each factor :cite:p:`saltelli2008global`. LHS is an attractive technique, because it guarantees a diverse coverage of the space, through the use of subintervals, without being constrained to discrete levels for each factor - compare :numref:`Figure_3_3` (c) with :numref:`Figure_3_3` (d) for the same number of samples. + +LHS is less effective when the number of samples is not much larger than the number of uncertain factors, and the effects of each factor cannot be appropriately distinguished. The samples between factors can also be highly correlated, biasing any subsequent sensitivity analysis results. To address this, the sampling scheme can be modified to control for the correlation in parameters while maximizing the information derived. An example of such modification is through the use of orthogonal arrays :cite:p:`tang1993orthogonal`. diff --git a/dev/docs/html/_sources/3.3.4_low_discrepancy_sequences.rst b/dev/docs/html/_sources/3.3.4_low_discrepancy_sequences.rst new file mode 100644 index 0000000..d5fbc0a --- /dev/null +++ b/dev/docs/html/_sources/3.3.4_low_discrepancy_sequences.rst @@ -0,0 +1,8 @@ +Low-Discrepancy Sequences +************************* + +Low-discrepancy sequences is another sampling technique that employs a pseudo-random generator for Monte Carlo sampling :cite:p:`dalal2008low, zaremba1968mathematical`. These quasi-Monte Carlo methods eliminate 'lumpiness' across samples (i.e, the presence of gaps and clusters) by minimizing discrepancy across the hypercube samples. Discrepancy can be quantitatively measured using the deviations of sampled points from a uniform distribution :cite:`kucherenko2015exploring, dalal2008low`. Low-discrepancy sequences ensure that the number of samples in any subspace of the variability hypercube is approximately the same. This is not something guaranteed by Latin Hypercube sampling, and even though its design can be improved through optimization with various criteria, such adjustments are limited to small sample sizes and low dimensions :cite:`iooss2010numerical,jin2003efficient,kucherenko2015exploring,morris1995exploratory,park1994optimal`. In contrast, the Sobol sequence :cite:p:`sobol1976uniformly,sobol1967distribution`, one of the most widely used sampling techniques, utilizes the low-discrepancy approach to uniformly fill the sampled factor space. A core advantage of this style of sampling is that it takes far fewer samples (i.e., simulations) to attain a much lower level of error in estimating model output statistics (e.g., the mean and variance of outputs). + +.. note:: + + Put this into practice! Click the following link to try out an interactive tutorial which uses Sobol sequence sampling for the purposes of a Sobol sensitivity analysis: `Sobol SA using SALib Jupyter Notebook `_ diff --git a/dev/docs/html/_sources/3.3.5_other_types_of_sampling.rst b/dev/docs/html/_sources/3.3.5_other_types_of_sampling.rst new file mode 100644 index 0000000..2f947b6 --- /dev/null +++ b/dev/docs/html/_sources/3.3.5_other_types_of_sampling.rst @@ -0,0 +1,4 @@ +Other types of sampling +*********************** + +The sampling techniques mentioned so far are general sampling methods useful for a variety of applications beyond sensitivity analysis. There are however techniques that have been developed for specific sensitivity analysis methods. Examples of these methods include the Morris One-At-a-Time :cite:p:`morris1991factorial`, Fourier Amplitude Sensitivity Test (FAST; :cite:p:`cukier1973study`), Extended FAST :cite:p:`saltelli1999quantitative`, and Extended Sobol methods :cite:p:`sobol2001global`. For example, the Morris sampling strategy builds a number of trajectories (usually referred to as repetitions and denoted by :math:`r`) in the input space each composed of :math:`N+1` factor points, where :math:`N` is the number of uncertain factors. The first point of the trajectory is selected randomly and the subsequent :math:`N` points are generated by moving one factor at a time by a fixed amount. Each factor is perturbed once along the trajectory, while the starting points of all of the trajectories are randomly and uniformly distributed. Several variations of this strategy also exist in the literature; for more details on each approach and their differences the reader is directed to :cite:t:`pianosi2016sensitivity`. diff --git a/dev/docs/html/_sources/3.3.6_synthetic_generation_of_input_time_series.rst b/dev/docs/html/_sources/3.3.6_synthetic_generation_of_input_time_series.rst new file mode 100644 index 0000000..6d283b2 --- /dev/null +++ b/dev/docs/html/_sources/3.3.6_synthetic_generation_of_input_time_series.rst @@ -0,0 +1,4 @@ +Synthetic generation of input time series +***************************************** + +Models often have input time series or processes with strong temporal and/or spatial correlations (e.g., streamflow, energy demand, pricing of commodities, etc.) that, while they might not immediately come to mind as factors to be examined in sensitivity analysis, can be treated as such. Synthetic input time series are used for a variety of reasons, for example, when observations are not available or are limited, or when past observations are not considered sufficiently representative to capture rare or extreme events of interest :cite:p:`herman2016synthetic,milly2008stationarity`. Synthetic generation of input time series provides a valuable tool to consider non-stationarity and incorporate potential stressors, such as climate change impacts into input time series :cite:p:`borgomeo2015numerical`. For example, a century of record will be insufficient to capture very high impact rare extreme events (e.g., persistent multi-year droughts). A large body of statistical literature exists focusing on the topics of synthetic weather :cite:p:`herrera2017review,wilks1999weather` and streamflow :cite:p:`lamontagne2018generating,medda2019comparison` generation that provides a rich suite of approaches for developing history-informed, well-characterized stochastic process models to better estimate rare individual or compound (hot, severe drought) extremes. It is beyond the scope of this text to review these methods, but readers are encouraged to explore the studies cited above as well as the following publications for discussions and comparisons of these methods: :cite:p:`borgomeo2015numerical,herman2016synthetic,kirsch2013evaluating,loucks2017water,steinschneider2015integrated,vogel2017stochastic,vogel1988value`. The use of these methods for the purposes of exploratory modeling, especially in the context of well-characterized versus deep uncertainty, is further discussed in :numref:`consequential_scenarios`. diff --git a/dev/docs/html/_sources/3.3_design_of_experiments.rst b/dev/docs/html/_sources/3.3_design_of_experiments.rst new file mode 100644 index 0000000..535e274 --- /dev/null +++ b/dev/docs/html/_sources/3.3_design_of_experiments.rst @@ -0,0 +1,46 @@ +.. _design_of_experiments: + +Design of Experiments +##################### + +Before conducting a sensitivity analysis, the first element that needs to be clarified is the uncertainty space of the model :cite:p:`helton2006survey,pianosi2016sensitivity`. In other words, how many and which factors making up the mathematical model are considered uncertain and can potentially affect the model output and the inferences drawn from it. Uncertain factors can be model parameters, model structures, inputs, or alternative model resolution levels (scales), all of which can be assessed through the tools presented in this text. Depending on the kind of factor, its variability can be elicited through various means: expert opinion, values reported in the literature, historical observations, its physical meaning (e.g., population values in a city can never be negative), or through the use of more formal UQ methods (:numref:`A1_uncertainty_quantification`). The model uncertainty space represents the entire space of variability present in each of the uncertain factors of a model. The complexity of most real-world models means that the response function, :math:`y=g(x)`, mapping inputs to outputs, is hardly ever available in an analytical form and therefore analytically computing the sensitivity of the output to each uncertain factor becomes impossible. In these cases, sensitivity analysis is only feasible through numerical procedures that employ different strategies to sample the uncertainty space and calculate sensitivity indices. + +A sampling strategy is often referred to as a *design of experiments* and represents a methodological choice made before conducting any sensitivity analysis. Experimental design was first introduced by :cite:t:`fisher1936design` in the context of laboratory or field-based experiments. Its application in sensitivity analysis is similar to setting up a physical experiment in that it is used to discover the behavior of a system under specific conditions. An ideal design of experiments should provide a framework for the extraction of all plausible information about the impact of each factor on the output of the model. The design of experiments is used to set up a simulation platform with the minimum computational cost to answer specific questions that cannot be readily drawn from the data through analytical or common data mining techniques. Models representing coupled human-natural systems usually have a large number of inputs, state variables and parameters, but not all of them exert fundamental control over the numerical process, despite their uncertainty, nor have substantial impacts on the model output, either independently or through their interactions. Each factor influences the model output in different ways that need to be discovered. For example, the influence of a parameter on model output can be linear or non-linear and can be continuous or only be active during specific times or at particular states of the system :cite:p:`herman2013time, massmann2014new`. An effective and efficient design of experiments allows the analyst to explore these complex relationships and evaluate different behaviors of the model for various scientific questions :cite:p:`van2016sensitivity`. The rest of this section overviews some of the most commonly used designs of experiments. Table 1 summarizes the designs discussed. + +.. list-table:: Summary of designs of experiments overviewed in this section. * Depends on the sample size. + :header-rows: 1 + + * - *Design of experiments* + - *Factor interactions considered* + - *Treatment of factor domains* + * - One-At-a-Time (OAT) + - No - main effects only + - Continuous (distributions) + * - Full Factorial Sampling + - Yes - including total effects + - Discrete (levels) + * - Fractional Factorial Sampling + - Yes - only lower-order effects* + - Discrete (levels) + * - Latin Hypercube (LH) Sampling + - Yes - including total effects* + - Continuous (distributions) + * - Quasi-Random Sampling with Low-Discrepancy Sequences + - Yes - including total effects* + - Continuous (distributions) + +There are a few different approaches to the design of experiments, closely related to the chosen sensitivity analysis approach, which is in turn shaped by the research motivations, scientific questions, and computational constraints at hand (additional discussion of this can be found at the end of :numref:`3_sensitivity_analysis_the_basics`). For example, in a sensitivity analysis using perturbation and derivatives methods, the model input parameters vary from their nominal values one at a time, something that the design of experiments needs to reflect. If, instead, one were to perform sensitivity analysis using a multiple-starts perturbation method, the design of experiments needs to consider that multiple points across the factor space are used. The design of experiments specifically defines two key characteristics of samples that are fed to the numerical model: the number of samples and the range of each factor. + +Generally, sampling can be performed randomly or by applying a stratifying approach. In random sampling, such as Monte Carlo :cite:p:`metropolis1949monte`, samples are randomly generated by a pseudo-random number generator with an a-priori assumption about the distribution of parameters and their possible ranges. Random seeds can also be used to ensure consistency and higher control over the random process. However, this method could leave some gaps in the parameter space and cause clustering in some spaces, especially for a large number of parameters :cite:p:`norton2015introduction`. Most sampling strategies use stratified sampling to mitigate these disadvantages. Stratified sampling techniques divide the domain of each factor into subintervals, often of equal lengths. From each subinterval, an equal number of samples is drawn randomly, or based on the specific locations within the subintervals :cite:p:`saltelli2008global`. + +.. include:: 3.3.1_one_at_a_time_oat.rst + +.. include:: 3.3.2_full_fractional_factorial_sampling.rst + +.. include:: 3.3.3_latin_hypercube_sampling.rst + +.. include:: 3.3.4_low_discrepancy_sequences.rst + +.. include:: 3.3.5_other_types_of_sampling.rst + +.. include:: 3.3.6_synthetic_generation_of_input_time_series.rst diff --git a/dev/docs/html/_sources/3.4.1_derivative_based_methods.rst b/dev/docs/html/_sources/3.4.1_derivative_based_methods.rst new file mode 100644 index 0000000..27bb1bf --- /dev/null +++ b/dev/docs/html/_sources/3.4.1_derivative_based_methods.rst @@ -0,0 +1,16 @@ +Derivative-based Methods +************************ + +Derivative-based methods explore how model outputs are affected by perturbations in a single model input around a particular input value. These methods are local and are performed using OAT sampling. For simplicity of mathematical notations, let us assume that the model :math:`g(X)` only returns one output. Following :cite:p:`borgonovo2008sensitivity` and :cite:p:`pianosi2016sensitivity`, the sensitivity index, :math:`S_i` , of the model’s *i*-th input factor, :math:`x_i` , can be measured using the partial derivative evaluated at a nominal value, :math:`\bar{x}`, of the vector of inputs: + +.. math:: + + S_i (\bar{x}) = \frac{\partial g}{\partial x} |_{\bar{x}{^c{_i}}} + +where *c*\ :sub:`i`\ is the scaling factor. In most applications however, the relationship :math:`g(X)` is not fully known in its analytical form, and therefore the above partial derivative is usually approximated: + +.. math:: + + S_i (\bar{x}) = \frac{g(\bar{x}_1,...\bar{x}_i+\Delta_i,...\bar{x}_N)-g(\bar{x}_1,...\bar{x}_i,...\bar{x}_N)}{\Delta_i}c_i + +Using this approximation, the *i*-th input factor is perturbed by a magnitude of :math:`\Delta_i`, and its relative importance is calculated. Derivative-based methods are some of the oldest sensitivity analysis methods as they only require :math:`N+1` model evaluations to estimate indices for :math:`N` uncertain factors. As described above, being computationally very cheap comes at the cost of not being able to explore the entire input space, but only (local) perturbations to the nominal value. Additionally, as these methods examine the effects of each input factor one at a time, they cannot assess parametric interactions or capture the interacting nature of many real systems and the models that abstract them. diff --git a/dev/docs/html/_sources/3.4.2_elementary_effect_methods.rst b/dev/docs/html/_sources/3.4.2_elementary_effect_methods.rst new file mode 100644 index 0000000..f3bc67c --- /dev/null +++ b/dev/docs/html/_sources/3.4.2_elementary_effect_methods.rst @@ -0,0 +1,29 @@ +Elementary Effect Methods +************************* + +Elementary effect (EE) SA methods provide a solution to the local nature of the derivative-based methods by exploring the entire parametric range of each input parameter :cite:p:`iooss2015review`. However, EE methods still use OAT sampling and do not vary all input parameters simultaneously while exploring the parametric space. The OAT nature of EEs methods therefore prevents them from properly capturing the interactions between uncertain factors. EEs methods are computationally efficient compared to their All-At-a-Time (AAT) counterparts, making them more suitable when computational capacity is a limiting factor, while still allowing for some inferences regarding factor interactions. +The most popular EE method is the Method of Morris :cite:p:`morris1991factorial`. Following the notation by :cite:p:`pianosi2016sensitivity`, this method calculates global sensitivity using the mean of the EEs (finite differences) of each parameter at different locations: + +.. math:: + + S_i = \mu_i^* = \frac{1}{r}\sum_{j=1}^r EE^j_i = \frac{1}{r}\sum_{j=1}^r \frac{g(\bar{x}_1,...\bar{x}_i+\Delta_i,...\bar{x}_N)-g(\bar{x}_1,...\bar{x}_i,...\bar{x}_N)}{\Delta_i}c_i + + +with :math:`r` representing the number of sample repetitions (also refered to as trajectories) in the input space, usually set between 4 and 10 :cite:`saltelli2004sensitivity`. Each :math:`x_j` represents the points of each trajectory, with :math:`j=1,…, r`, selected as described in the sampling strategy for this method, found above. This method also produces the standard deviation of the EEs: + +.. math:: + + \sigma_i = \sqrt{\frac{1}{r}\sum_{j=1}^r(EE_i^j-\frac{1}{r}\sum_{j=1}^r EE^j_i)^2} + + +which is a measure of parametric interactions. Higher values of :math:`\sigma_i` suggest model responses at different levels of factor :math:`x_i` are significantly different, which indicates considerable interactions between that and other uncertain factors. The values of :math:`\mu_i^*` and :math:`\sigma_i` for each factor allow us to draw several different conclusions, illustrated in :numref:`Figure_3_4`, following the example by :cite:`iooss2015review`. In this example, factors :math:`x_1`, :math:`x_2`, :math:`x_4`, and :math:`x_5` can be said to have an influence on the model outputs, with :math:`x_1`, :math:`x_4`, and :math:`x_5` having some interactive or non-linear effects. Depending on the orders of magnitude of :math:`\mu_i^*` and :math:`\sigma_i` one can indirectly deduce whether the factors have strong interactive effects, for example if a factor :math:`\sigma_i << \mu_i^*` then the relationship between that factor and the output can be assumed to be largely linear (note that this is still an OAT method and assumptions on factor interactions should be strongly caveated). Extensions of the Method of Morris have also been developed specifically for the purposes of factor fixing and explorations of parametric interactions (e.g., :cite:p:`borgonovo2010sensitivity,campolongo1999use,cropp2002new`). + + +.. _Figure_3_4: +.. figure:: _static/figure3_4_morris_method.png + :alt: Figure 3.4 + :width: 700px + :figclass: margin-caption + :align: center + + Illustrative results of the Morris Method. Factors :math:`x_1`, :math:`x_2`, :math:`x_4`, and :math:`x_5` have an influence on the model outputs, with :math:`x_1`, :math:`x_4`, and :math:`x_5` having interactive or non-linear effects. Whether or not a factor should be considered influential to the output depends on the output selected and is specific to the research context and purpose of the analysis, as discussed in :numref:`why_SA`. diff --git a/dev/docs/html/_sources/3.4.3_regression_based_methods.rst b/dev/docs/html/_sources/3.4.3_regression_based_methods.rst new file mode 100644 index 0000000..722cb9c --- /dev/null +++ b/dev/docs/html/_sources/3.4.3_regression_based_methods.rst @@ -0,0 +1,27 @@ +Regression-based Methods +************************ +Regression analysis is one of the oldest ways of investigating parametric importance and sensitivity :cite:p:`saltelli2004sensitivity`. Here, we describe some of the most popular regression-based sensitivity indices. One of the main sensitivity indices of this category is the standardized regression coefficient (SRC). To calculate SRC, a linear regression relationship needs to be fitted between the input vector, :math:`x`, and the model output of interest by using a least-square minimizing method: + +.. math:: + y = b_0 + \sum_{i=1}^N b_ix_i + +where :math:`b_0` and :math:`b_i` (corresponding to the *i*-th model input) are regression coefficients. The following relationship can then be used to calculate the SRCs for different input values: + +.. math:: + S_i=SRC_i=b_i\frac{\sigma_i}{\sigma_y} + +where :math:`\sigma_i` and :math:`\sigma_y` are standard deviations of *i*-th model input and output, respectively. + +Several other regression-based indices explore the correlation between input and output parameters as a proxy to model parametric sensitivity :cite:`helton1993uncertainty,iooss2015review,manache2008identification`. The Pearson correlation coefficient (PCC) can be used when a linear relationship exists between an uncertain factor, :math:`x_i`, and the output :math:`y`: + +.. math:: + S_i=PCC=\frac{cov(x_i,y)}{\sigma_i\sigma_y} + +In cases when there are outliers in the data or the relationship between the uncertain factors and the output is not linear, rank-based correlation coefficients are preferred, for example, Spearman’s rank correlation coefficient (SRCC): + +.. math:: + S_i=SRCC=\frac{cov(rx_i,ryi)}{\sigma_{ri}\sigma_{ry}} + +where the raw values of :math:`x_i` and :math:`y` and converted to ranks :math:`rx_i` and :math:`ry` respectively, which instead represent a measurement of the strength of the monotonic relationship, rather than linear relationship, between the input and output. Other regression-based metrics include the partial correlations coefficient, the partial rank correlations coefficient, and the Nash-Sutcliffe coefficient, more discussion on which can be found in :cite:p:`iooss2015review,borgonovo2016sensitivity`. + +Tree-based regression techniques have also been used for sensitivity analysis in an effort to address the challenges faced with nonlinear models :cite:p:`pappenberger2006ignorance`. Examples of these methods include the Patient Rule Induction Method (PRIM; :cite:p:`friedman1999bump`) and Classification And Regression Trees (CART; :cite:p:`breiman1984classification`). CART-based approaches also include boosting and bagging extensions :cite:p:`freund1999short,breiman1996bagging`. These methods are particularly useful when sensitivity analysis is used for factor mapping (i.e., when trying to identify which uncertain model factors produce a certain model behavior). :numref:`consequential_scenarios` elaborates on the use of these methods. Regression-based sensitivity analysis methods are global by nature and can explore the entire space of variables. However, the true level of comprehensiveness depends on the design of experiments and the number of simulations providing data to establish the regression relationships. Although they are usually computationally efficient, they do not produce significant information about parametric interactions :cite:p:`borgonovo2016sensitivity,saltelli2004sensitivity`. diff --git a/dev/docs/html/_sources/3.4.4_regional_sensitivity_analysis.rst b/dev/docs/html/_sources/3.4.4_regional_sensitivity_analysis.rst new file mode 100644 index 0000000..953b634 --- /dev/null +++ b/dev/docs/html/_sources/3.4.4_regional_sensitivity_analysis.rst @@ -0,0 +1,10 @@ +Regional Sensitivity Analysis +***************************** +Another method primarily applied for basic factor mapping applications is Regional Sensitivity Analysis (RSA; :cite:p:`hornberger1981approach`). RSA is a global sensitivity analysis method that is typically implemented using standard sampling methods such as latin hypercube sampling. It is performed by specifying a condition on the output space (e.g., an upper threshold) and classifying outputs that meet the condition as behavioral and the ones that fail it as non-behavioral (illustrated in :numref:`Figure_3_2` (b)). Note that the specified threshold depends on the nature of the problem, model, and the research question. It can reflect model-performance metrics (such as errors) or consequential decision-relevant metrics (such as unacceptable system outcomes). The behavioral and non-behavioral outputs are then traced back to their originating sampled factors, where differences between the distributions of samples can be used to determine their significance in producing each part of the output. The Kolmogorov-Smirnov divergence is commonly used to quantify the difference between the distribution of behavioral and non-behavioral parameters :cite:p:`pianosi2016sensitivity`: + +.. math:: + S_i=|F_{x_i|y_b} (y \in Y_b)-F_{x_i|y_{nb}} (y \in Y_{nb})| + +where :math:`Y_b` represents the set of behavioral outputs, and :math:`F_{x_i|y_b}` is the empirical cumulative distribution function of the values of :math:`x_i` associated with values of :math:`y` that belong in the behavioral set. The :math:`nb` notation indicates the equivalent elements related to the non-behavioral set. Large differences between the two distributions indicate stronger effects by the parameters on the respective part of the output space. + +Used in a factor mapping setting, RSA can be applied for scenario discovery :cite:p:`lempert2006general,groves2007new`, the Generalized Likelihood Uncertainty Estimation method (GLUE; :cite:p:`beven_future_1992,beven2014glue,blasone2008generalized`) and other hybrid sensitivity analysis methods (e.g., :cite:p:`cryer1999regional,wei2013monte`). The fundamental shortcomings of RSA are that, in some cases, it could be hard to interpret the difference between behavioral and non-behavioral sample sets, and that insights about parametric correlations and interactions cannot always be uncovered :cite:p:`saltelli2004sensitivity`. For more elaborate discussions and illustrations of the RSA method, readers are directed to :cite:t:`saltelli2008global,tang2007comparing,young1999data` and references therein. diff --git a/dev/docs/html/_sources/3.4.5_variance_based_methods.rst b/dev/docs/html/_sources/3.4.5_variance_based_methods.rst new file mode 100644 index 0000000..73ba3ba --- /dev/null +++ b/dev/docs/html/_sources/3.4.5_variance_based_methods.rst @@ -0,0 +1,32 @@ +.. _variance-based_methods: + +Variance-based Methods +********************** +Variance-based sensitivity analysis methods hypothesize that various specified model factors contribute differently to the variation of model outputs; therefore, decomposition and analysis of output variance can determine a model’s sensitivity to input parameters :cite:p:`sobol2001global,saltelli2004sensitivity`. The most popular variance-based method is the Sobol method, which is a global sensitivity analysis method that takes into account complex and nonlinear factor interaction when calculating sensitivity indices, and employs more sophisticated sampling methods (e.g., the Sobol sampling method). The Sobol method is able to calculate three types of sensitivity indices that provide different types of information about model sensitivities. These indices include first-order, higher-order (e.g., second-, third-, etc. orders), and total-order sensitivities. + +The first-order sensitivity index indicates the percent of model output variance contributed by a factor individually (i.e., the effect of varying :math:`x_i` alone) and is obtained using the following :cite:p:`sobol2001global,saltelli2002making`: + +.. math:: + S_i^1=\frac{V_{x_i}[E_{x_{\sim i}}(x_i)]}{V(y)} + +with :math:`E` and :math:`V` denoting the expected value and the variance, respectively. :math:`x_{\sim i}` denotes all factors expect for :math:`x_i`. The first-order sensitivity index (:math:`S_i^1`) can therefore also be thought of as the portion of total output variance (:math:`V_y`) that can be reduced if the uncertainty in factor :math:`x_i` is eliminated :cite:p:`saltelli2002sensitivity`. First-order sensitivity indices are usually used to understand the independent effect of a factor and to distinguish its individual versus interactive influence. It would be expected for linearly independent factors that they would only have first order indices (no interactions) that should correspond well with sensitivities obtained from simpler methods using OAT sampling. + +Higher-order sensitivity indices explore the interaction between two or more parameters that contribute to model output variations. For example, a second-order index indicates how interactions between a pair of factors can lead to change in model output variance and is calculated using the following relationship: + +.. math:: + S_{ij}^2=\frac{V_{x_{i,j}}[E_{x_{\sim i,j}}(x_i,x_j)]}{V(y)} + +with :math:`i \ne j`. Higher order indices can be calculated by similar extensions (i.e., fixing additional operators together), but it is usually computationally expensive in practice. + +The total sensitivity analysis index represents the entire influence of an input factor on model outputs including all of its interactions with other factors :cite:p:`homma1996importance`. In other words, total-order indices include first-order and all higher-order interactions associated with each factor and can be estimated calculated using the following: + +.. math:: + S_i^T= \frac{E_{x_{\sim i}}[V_{x_i}(x_{\sim i})]}{V(y)} = 1 - \frac{V_{x_{\sim i}}[E_{x_{i}}(x_{\sim i})]}{V(y)} + +This index reveals the expected portion of variance that remains if uncertainty is eliminated in all factors but :math:`x_i` :cite:p:`saltelli2002sensitivity`. The total sensitivity index is the overall best measure of sensitivity as it captures the full individual and interactive effects of model factors. + +Besides the Sobol method, there are some other variance-based sensitivity analysis methods, such as the Fourier amplitude sensitivity test (FAST; :cite:`cukier1973study, mcrae1982development`) and extended-FAST :cite:`saltelli1998alternative,vazquez2014global`, that have been used by the scientific community. However, Sobol remains by far the most common method of this class. Variance-based techniques have been widely used and have proved to be powerful in a variety of applications. Despite their popularity, some authors have expressed concerns about the methods’ appropriateness in some settings. Specifically, the presence of heavy-tailed distributions or outliers, or when model outputs are multimodal can bias the sensitivity indices produced by these methods :cite:p:`auder2008global,zadeh2017comparison,pianosi2015simple`. Moment-independent measures, discussed below, attempt to overcome these challenges. + +.. note:: + + Put this into practice! Click the following link to try out an interactive tutorial which demonstrates the application of a Sobol sensitivity analysis: `Sobol SA using SALib Jupyter Notebook `_ diff --git a/dev/docs/html/_sources/3.4.6_analysis_of_variance_anova.rst b/dev/docs/html/_sources/3.4.6_analysis_of_variance_anova.rst new file mode 100644 index 0000000..b359cd8 --- /dev/null +++ b/dev/docs/html/_sources/3.4.6_analysis_of_variance_anova.rst @@ -0,0 +1,3 @@ +Analysis of Variance (ANOVA) +**************************** +Analysis of Variance (ANOVA) was first introduced by :cite:t:`fisher1934statistical` and has since become a popular factor analysis method in physical experiments. ANOVA can be used as a sensitivity analysis method in computational experiments with a factorial design of experiment (referred to as factorial ANOVA). Note that Sobol can also be categorized as an ANOVA sensitivity analysis method, and that is why Sobol is sometimes referred to as a functional ANOVA :cite:p:`brevault2013comparison`. Factorial ANOVA methods are particularly suited for models and problems that have discrete input spaces, significantly reducing the computational time. More information about these methods can be found in :cite:p:`archer1997sensitivity,brevault2013comparison,owen2013variance`. \ No newline at end of file diff --git a/dev/docs/html/_sources/3.4.7_moment_independent_density_based_methods.rst b/dev/docs/html/_sources/3.4.7_moment_independent_density_based_methods.rst new file mode 100644 index 0000000..9607e68 --- /dev/null +++ b/dev/docs/html/_sources/3.4.7_moment_independent_density_based_methods.rst @@ -0,0 +1,8 @@ +Moment-Independent (Density-Based) Methods +****************************************** +These methods typically compare the entire distribution (i.e., not just the variance) of input and output parameters in order to determine the sensitivity of the output to a particular input variable. Several moment-independent sensitivity analysis methods have been proposed in recent years. The delta (:math:`\delta`) moment-independent method calculates the difference between unconditional and conditional cumulative distribution functions of the output. The method was first introduced by :cite:`borgonovo2006measuring,borgonovo2007new` and has become widely used in various disciplines. The :math:`\delta` sensitivity index is defined as follows: + +.. math:: + S_i=\delta_i=\frac{1}{2}E_{x_i}|f_y(y)-f_{y|x_i}(y)|dy + +where :math:`f_y(y)` is the probability density function of the entire model output :math:`y`, and :math:`f_{y|x_i}(y)` is the conditional density of :math:`y`, given that factor :math:`x_i` assumes a fixed value. The :math:`\delta_i` sensitivity indicator therefore represents the normalized expected shift in the distribution of :math:`y` provoked by :math:`x_i`. Moment-independent methods are advantageous in cases where we are concerned about the entire distribution of events, such as when uncertain factors lead to more extreme events in a system :cite:p:`hadjimichael_advancing_2020`. Further, they can be used with a pre-existing sample of data, without requiring a specific sampling scheme, unlike the previously reviewed methods :cite:p:`plischke2013global`. The :math:`\delta` sensitivity index does not include interactions between factors and it is therefore akin to the first order index produced by the Sobol method. Interactions between factors can still be estimated using this method, by conditioning the calculation on more than one uncertain factor being fixed :cite:p:`borgonovo2007new`. diff --git a/dev/docs/html/_sources/3.4_sensitivity_analysis_methods.rst b/dev/docs/html/_sources/3.4_sensitivity_analysis_methods.rst new file mode 100644 index 0000000..87aa851 --- /dev/null +++ b/dev/docs/html/_sources/3.4_sensitivity_analysis_methods.rst @@ -0,0 +1,20 @@ +.. _sensitivity_analysis_methods: + +Sensitivity Analysis Methods +############################ + +In this section, we describe some of the most widely applied sensitivity analysis methods along with their mathematical definitions. We also provide a detailed discussion on applying each method, as well as a comparison of and their features and limitations. + +.. include:: 3.4.1_derivative_based_methods.rst + +.. include:: 3.4.2_elementary_effect_methods.rst + +.. include:: 3.4.3_regression_based_methods.rst + +.. include:: 3.4.4_regional_sensitivity_analysis.rst + +.. include:: 3.4.5_variance_based_methods.rst + +.. include:: 3.4.6_analysis_of_variance_anova.rst + +.. include:: 3.4.7_moment_independent_density_based_methods.rst diff --git a/dev/docs/html/_sources/3.5_how_to_choose_a_sensitivity_analysis_method_model_traits_and_dimensionality.rst b/dev/docs/html/_sources/3.5_how_to_choose_a_sensitivity_analysis_method_model_traits_and_dimensionality.rst new file mode 100644 index 0000000..3413145 --- /dev/null +++ b/dev/docs/html/_sources/3.5_how_to_choose_a_sensitivity_analysis_method_model_traits_and_dimensionality.rst @@ -0,0 +1,17 @@ +How To Choose A Sensitivity Analysis Method: Model Traits And Dimensionality +############################################################################ + +:numref:`Figure_3_5`, synthesized from variants found in :cite:p:`iooss2015review, pianosi2016sensitivity`, presents a graphical synthesis of the methods overviewed in this section, with regards to their appropriateness of application based on the complexity of the model at hand and the computational limits on the number of model evaluations afforded. The bars below each method also indicate the sensitivity analysis purposes they are most appropriate to address, which are in turn a reflection of the motivations and research questions the sensitivity analysis is called to address. Computational intensity is measured as a multiple of the number of model factors that are considered uncertain (:math:`d`). Increasing model complexity mandates that more advanced sensitivity analysis methods are applied to address potential nonlinearities, factor interactions, and discontinuities. Such methods can only be performed at increasing computational expense. For example, computationally cheap linear regression should not be used to assess factors’ importance if the model cannot be proven linear and the factors independent, because important relationships will invariably be missed (recall the example in :numref:`Figure_3_5`). When computational limits do constrain applications to make simplified assumptions and sensitivity techniques, any conclusions in such cases should be delivered with clear statements of the appropriate caveats. + +.. _Figure_3_5: +.. figure:: _static/figure3_5classificationofmethods.png + :alt: Figure 3_5 + :width: 700px + :figclass: margin-caption + :align: center + + Classification of the sensitivity analysis methods overviewed in this section, with regards to their computational cost (horizontal axis), their appropriateness to model complexity (vertical axis), and the purpose they can be used for (colored bars). d: number of uncertain factors considered; ANOVA: Analysis of Variance; FAST: Fourier Amplitude Sensitivity Test; PRIM: Patient Rule Induction Method; CART: Classification and Regression Trees; SRCC: Spearman’s rank correlation coefficient: NSE: Nash–Sutcliffe efficiency; SRC: standardized regression coefficient; PCC: Pearson correlation coefficient. This figure is synthesized from variants found in :cite:p:`iooss2015review, pianosi2016sensitivity`. + +The reader should also be aware that the estimates of computational intensity that are given here are indicative of magnitude and would vary depending on the sampling technique, model complexity and the level of information being asked. For example, a Sobol sensitivity analysis typically requires a sample of size :math:`n * d+2` to produce first- and total-order indices, where :math:`d` is the number of uncertain factors and :math:`n` is a scaling factor, selected ad hoc, depending on model complexity :cite:p:`saltelli2002relative`. The scaling factor :math:`n` is typically set to at least 1000, but it should most appropriately be set on the basis of index convergence. In other words, a prudent analyst would perform the analysis several times with increasing :math:`n` and observe at what level the indices converge to stable values :cite:p:`NOSSENT20111515`. The level should be the minimum sample size used in subsequent sensitivity analyses of the same system. Furthermore, if the analyst would like to better understand the degrees of interaction between factors, requiring second-order indices, the sample size would have to increase to :math:`n * 2d+2` :cite:p:`saltelli2002relative`. + +Another important consideration is that methods that do not require specific sampling schemes can be performed in conjunction with others without requiring additional model evaluations. None of the regression-based methods, for example, require samples of specific structures or sizes, and can be combined with other methods for complementary purposes. For instance, one could complement a Sobol analysis with an application of CART, using the same data, but to address questions relating to factor mapping (e.g., we know factor :math:`x_i` is important for a model output, but we would like to also know which of its values specifically push the output to undesirable states). Lastly, comparing results from different methods performed together can be especially useful in model diagnostic settings. For example, :cite:p:`hadjimichael_advancing_2020` used :math:`\delta` indices, first-order Sobol indices, and :math:`R^2` values from linear regression, all performed on the same factors, to derive insights about the effects on factors on different moments of the output distribution and about the linearity of their relationship. diff --git a/dev/docs/html/_sources/3.6_software_toolkits.rst b/dev/docs/html/_sources/3.6_software_toolkits.rst new file mode 100644 index 0000000..0b42a98 --- /dev/null +++ b/dev/docs/html/_sources/3.6_software_toolkits.rst @@ -0,0 +1,19 @@ +.. _software_toolkits: + +Software Toolkits +################# + +This section presents available open source sensitivity analysis software tools, based on the programming language they use and the methods they support :numref:`Figure_3_6`. Our review covers five widely used programming languages: R, MATLAB, Julia, Python, and C++, as well as one tool that provides a graphical user interface (GUI). Each available SA tool was assessed on the number of SA methods and design of experiments methods it supports. For example, the *sensobol* package in R only supports the variance-based Sobol method. However, it is the only package we came across that calculates third-order interactions among parameters. On the other side of the spectrum, there are SA software packages that contain several popular SA methods. For example, *SALib* in Python :cite:`herman2017salib` supports seven different SA methods. The *DifferentialEquations* package is a comprehensive package developed for Julia, and *GlobalSensitivityAnalysis* is another Julia package that has mostly adapted SALib methods. :numref:`Figure_3_6` also identifies the SA packages that have been updated since 2018, indicating active support and development. + + +.. _Figure_3_6: +.. figure:: _static/figure3_6_softwaretoolkits.png + :alt: Figure 3_6 + :width: 700px + :figclass: margin-caption + :align: center + + Sensitivity analysis packages available in different programming language platforms (R, Python, Julia, MATLAB, and C++), with the number of methods they support. Packages supporting more than five methods are indicated in pink. Packages updated since 2018 are indicated with asterisks. + + + diff --git a/dev/docs/html/_sources/3_sensitivity_analysis_the_basics.rst b/dev/docs/html/_sources/3_sensitivity_analysis_the_basics.rst new file mode 100644 index 0000000..8fee574 --- /dev/null +++ b/dev/docs/html/_sources/3_sensitivity_analysis_the_basics.rst @@ -0,0 +1,30 @@ +.. _3_sensitivity_analysis_the_basics: + +******************************** +Sensitivity Analysis: The Basics +******************************** + +.. include:: 3.1_global_versus_local_sensitivity.rst + +.. include:: 3.2_why_perform_sensitivity_analysis.rst + +.. include:: 3.3_design_of_experiments.rst + +.. include:: 3.4_sensitivity_analysis_methods.rst + +.. include:: 3.5_how_to_choose_a_sensitivity_analysis_method_model_traits_and_dimensionality.rst + +.. include:: 3.6_software_toolkits.rst + +.. note:: + The following articles are suggested as fundamental reading for the information presented in this section: + + * Wagener, T., Pianosi, F., 2019. What has Global Sensitivity Analysis ever done for us? A systematic review to support scientific advancement and to inform policy-making in earth system modelling. *Earth-Science Reviews* 194, 1–18. https://doi.org/10.1016/j.earscirev.2019.04.006 + * Pianosi, F., Beven, K., Freer, J., Hall, J.W., Rougier, J., Stephenson, D.B., Wagener, T., 2016. Sensitivity analysis of environmental models: A systematic review with practical workflow. Environmental Modelling & Software 79, 214–232. https://doi.org/10.1016/j.envsoft.2016.02.008 + + The following articles can be used as supplemental reading: + + * Saltelli, A., Ratto, M., Andres, T., Campolongo, F., Cariboni, J., Gatelli, D., Saisana, M., Tarantola, S., 2008. Global Sensitivity Analysis: The Primer, 1st edition. ed. Wiley-Interscience, Chichester, England ; Hoboken, NJ. + * Montgomery, D.C., 2017. Design and analysis of experiments. John Wiley & Sons. + * Iooss, B., Lemaître, P., 2015. A Review on Global Sensitivity Analysis Methods, in: Dellino, G., Meloni, C. (Eds.), Uncertainty Management in Simulation-Optimization of Complex Systems: Algorithms and Applications, Operations Research/Computer Science Interfaces Series. Springer US, Boston, MA, pp. 101–122. https://doi.org/10.1007/978-1-4899-7547-8_5 + diff --git a/dev/docs/html/_sources/4.1_understanding_errors_what_is_controlling_model_performance.rst b/dev/docs/html/_sources/4.1_understanding_errors_what_is_controlling_model_performance.rst new file mode 100644 index 0000000..c853642 --- /dev/null +++ b/dev/docs/html/_sources/4.1_understanding_errors_what_is_controlling_model_performance.rst @@ -0,0 +1,25 @@ +.. _4_1_understanding_errors: + +Understanding Errors: What Is Controlling Model Performance? +############################################################ + +Sensitivity analysis is a diagnostic tool when reconciling model outputs with observed data. It is helpful for clarifying how and under what conditions modeling choices (structure, parameterization, data inputs, etc.) propagate through model components and manifest in their effects on model outputs. This exploration is performed through carefully designed sampling of multiple combinations of input parameters and subsequent evaluation of the model structures that are emerging as controlling factors. Model structure and parameterization are two of the most commonly explored aspects of models that have been a central focus when evaluating their performance relative to available observations :cite:`beven1993prophecy`. Addressing these issues plays an important role in establishing credibility in model predictions, particularly in the positivist natural sciences literature. Traditional model evaluations compare the model with observed data, and then rely on expert judgements of its acceptability based on the closeness between simulation and observation with one or a small number of selected metrics. This approach can be myopic, as it is often impossible to use one metric to attribute a certain error and to link that with different parts of the model and its parameters :cite:`gupta2008reconciling`. This means that, even when the error or fitting measure between the model estimates and observations is very small, it is not guaranteed that all the components in the model accurately represent the conceptual reality that the model is abstracting: propagated errors in different parts of the model might cancel each other out, or multiple parameterized implementations of the model can yield similar performance (i.e., equifinality :cite:`beven1993prophecy`). + +The inherent complexity of a system hinders accepting or rejecting a model based on one performance measure and different types of measures can aid in evaluating the various components of a model as essentially a multiobjective problem :cite:`gupta2012towards,beven2018hypothesis`. In addition, natural systems mimicked by the models contain various interacting components that might act differently across spatial and temporal domains :cite:`herman2013time,pianosi2016sensitivity`. This heterogeneity is lost when a single performance measure is used, as a highly dimensional and interactive system becomes aggregated through the averaging of spatial or temporal output errors :cite:`beven2002towards`. Therefore, diagnostic error analyses should consider multiple error signatures across different scales and states of concern when seeking to understand how model performance relates to observed data (:numref:`Figure_4_1`). Diverse error signatures can be used to measure the consistency of underlying processes and behaviors of the model and to evaluate the dynamics of model controls under changing temporal and spatial conditions :cite:`beven2001equifinality`. Within this framework, even minimal information extracted from the data can be beneficial as it helps us unearth structural inadequacies in the model. In this context, proper selection of measures of model performance and the number of measures could play consequential roles in our understanding of the model and its predictions :cite:`mcphail2018robustness`. + +As discussed earlier, instead of the traditional focus on using deterministic prediction that results in a single error measure, many plausible states and spaces could be searched for making different inferences and quantifying uncertainties. This process also requires estimates of prior probability distributions of all the important parameters and quantification of model behavior across input space. One strategy to reduce the search space is filtering of some model alternatives that are not consistent with observations and known system behaviors. Those implausible parts of the search space can be referred to as non-physical or non-behavioral alternatives :cite:`edwards2011precalibrating, beven2014glue`. This step is conducted before the Bayesian calibration exercise (see :numref:`A1_uncertainty_quantification`). + +A comprehensive model diagnostic workflow typically entails the components demonstrated in :numref:`Figure_4_1`. The workflow begins with the selection of model input parameters and their plausible ranges. After the parameter selection, we need to specify the design of experiment (:numref:`design_of_experiments`) and the sensitivity analysis method (:numref:`sensitivity_analysis_methods`) to be used. As previously discussed, these methods require different numbers of model simulations, and each method provides a different insights into the direct effects and interactions of the uncertain factors. In addition, the simulation time of the model and the available computational resources are two of the primary considerations that influence these decisions. After identifying the appropriate methods, we generate a matrix of input parameters, where each set of input parameters will be used to conduct a model simulation. The model can include one or more output variables that fluctuate in time and space. The next step is to analyze model performance by comparing model outputs with observations. As discussed earlier, the positivist model evaluation paradigm focuses on a single model performance metric (error), leading to a loss of information about model parameters and the suitability of the model's structure. However, a thorough investigation of the temporal and spatial signatures of model outputs using various performance metrics or time- and space-varying sensitivity analyses can shed more light on the fitness of each parameter set and the model’s internal structure. This analysis provides diagnostic feedback on the importance and range of model parameters and can guide further improvement of the model algorithm. + +.. _Figure_4_1: +.. figure:: _static/figure4_1_diagnostic_workflow.png + :alt: Figure 4.1 + :width: 500px + :figclass: margin-caption + :align: center + + Diagnostic evaluation of model fidelity using sensitivity analysis methods. + +.. note:: + + Put this into practice! Click the following badge to try out an interactive tutorial on implementing a time-varying sensitivity analysis of HYMOD model parameters: `HYMOD Jupyter Notebook `_ diff --git a/dev/docs/html/_sources/4.2_consequential_dynamics_what_is_controlling_model_behaviors_of_interest.rst b/dev/docs/html/_sources/4.2_consequential_dynamics_what_is_controlling_model_behaviors_of_interest.rst new file mode 100644 index 0000000..532651e --- /dev/null +++ b/dev/docs/html/_sources/4.2_consequential_dynamics_what_is_controlling_model_behaviors_of_interest.rst @@ -0,0 +1,48 @@ +Consequential Dynamics: What is Controlling Model Behaviors of Interest? +######################################################################## + +Consequential changes in dynamic systems can take many forms, but most dynamic behavior can be categorized in a few basic patterns. Feedback structures inherent in a system, be they positive or negative, generate these patterns which can be grouped into three groups for the simplest of systems: exponential growth, goal seeking, and oscillation (:numref:`Figure_4_2`). A positive (or self-reinforcing) feedback gives rise to exponential growth, a negative (or self-correcting) feedback gives rise to a goal seeking mode, and negative feedbacks with time delays give rise to oscillatory behavior. Nonlinear interactions between the system’s feedback structures can give rise to more complex dynamic behavior modes, examples of which are also shown in :numref:`Figure_4_2`, adapted from :cite:t:`sterman2001system`. + +.. _Figure_4_2: +.. figure:: _static/figure4_2_behavior_modes.png + :alt: Figure 4.2 + :width: 700px + :figclass: margin-caption + :align: center + + Common modes of behavior in dynamic systems, occurring based on the presence of positive and negative feedback relationships, and linear and non-linear interactions. Adapted from :cite:t:`sterman2001system`. + +The nature of feedback processes in a dynamic system shapes its fundamental behavior: positive feedbacks generate their own growth, negative feedbacks self-limit, seeking balance and equilibrium. In this manner, feedback processes give rise to different regimes, multiples of which could be present in each mode of behavior. Consider a population of mammals growing exponentially until it reaches the carrying capacity of its environment (referred to as S-shaped growth). When the population is exponentially growing, the regime is dominated by positive feedback relationships that reinforce its growth. As the population approaches its carrying capacity limit, negative feedback structures begin to dominate, counteracting the growth and establishing a stable equilibrium. Shifts between regimes can be thought of as tipping points, mathematically defined as unstable equilibria, where the presence of positive feedbacks amplifies disturbances and moves the system to a new equilibrium point. In the case of stable equilibria, the presence of negative feedbacks dampens any small disturbance and maintains the system at a stable state. As different feedback relationships govern each regime, different factors (those making up each feedback mechanism) are activated and shape the states the system is found in, as well as define the points of equilibria. + +For simple stylized models with a small number of states, system dynamics analysis can analytically derive these equilibria, the conditions for their stability and the factors determining them. The ability for this to be performed is, however, significantly challenged when it comes to systems that attempt to more closely resemble real complex systems. We argue this is the case for several reasons. First, besides generally exhibiting complex nonlinear dynamics, real world systems are also made up from larger numbers of interacting elements, which often makes the analytic derivation of system characteristics intractable :cite:p:`anderies2019knowledge,muneepeerakul2020emergence`. Second, human-natural systems temporally evolve and transform when human state-aware action is present. Consider, for instance, humans recreationally hunting the aforementioned population of mammals. Humans act based on the mammal population levels by enforcing hunting quotas or establishing protected territories or eliminating other predators. The mammal population reacts in a response, giving birth to ever changing state-action-consequence feedbacks, the path dependencies of which become difficult to diagnose and understand (e.g., :cite:p:`hadjimichael2020navigating`). Trying to simulate the combination of these two challenges (large numbers of state-aware agents interacting with a natural resource and with each other) produces intractable models that require advanced heuristics to analyze their properties and establish useful inferences. + +Sensitivity analysis paired with exploratory modeling methods offers a promising set of tools to address these challenges. We present a simple demonstrative application based on :cite:t:`quinn2017direct`. This stylistic example was first developed by :cite:t:`carpenter_management_1999` and represents a town that must balance its agricultural and industrial productivity with the pollution it creates in a downstream lake. Increased productivity allows for increased profits, which the town aims to maximize, but it also produces more pollution for the lake. Too much phosphorus pollution can cause irreversible eutrophication, a process known as “tipping” the lake. The model of phosphorus in the lake :math:`X_t` at time :math:`t` is governed by: + +.. math:: + + X_{t+1}= X_{t}+a_{t}+\frac{X_{t}^q} {1+X_{t}^q}-bX_t+\varepsilon + +where :math:`a_t \in [0,0.1]` is the town’s pollution release at each timestep, :math:`b` is the natural decay rate of phosphorus in the lake, :math:`q` defines the lake’s recycling rate (primarily through sediments), and :math:`\varepsilon` represents uncontrollable natural inflows of pollution modeled as a log-normal distribution with a given mean, :math:`\mu`, and standard deviation :math:`\sigma`. + +Panels (a-c) in :numref:`Figure_4_3` plot the fluxes of phosphorus into the lake versus the mass accumulation of phosphorus in the lake. The red line corresponds to the phosphorus sinks in the lake (natural decay), given by :math:`bX_t`. The grey shaded area represents the lake’s phosphorus recycling flux, given by :math:`\frac{X_{t}^q} {1+X_{t}^q}`. The points of intersection indicate the system’s equilibria, two of which are stable, and one is unstable (also known as the tipping point). The stable equilibrium in the bottom left of the figure reflects an oligotrophic lake, whereas the stable equilibrium in the top right represents a eutrophic lake. With increasing phosphorus values, the tipping point can be crossed, and the lake will experience irreversible eutrophication, as the recycling rate would exceed the removal rate even if the town’s pollution became zero. In the absence of anthropogenic and natural inflows of pollution in the lake (:math:`a_t` and :math:`\varepsilon` respectively), the area between the bottom-left black point and the white point in the middle can be considered as the safe operating space, before emission levels cross the tipping point. + +.. _Figure_4_3: +.. figure:: _static/figure4_3_lake_problem_fluxes.png + :alt: Figure 4.3 + :width: 700px + :figclass: margin-caption + :align: center + + Fluxes of phosphorus with regards to mass of phosphorus in the lake and sensitivity analysis results, assuming :math:`b=0.42` and :math:`q=2`. (a) Fluxes of phosphorus assuming no emmisions policy and no natural inflows. (b-c) Fluxes phosphorus when applying two different emissions policies. The "Best economic policy" and the "Most reliable policy" have been identified by :cite:t:`quinn2017direct` and can be found at :cite:t:`quinn_julianneqlake_problem_dps_2017`. (d) Results of a sensitivity analysis on the parameters of the model most consequential to the reliability of the "Most reliable policy". The code to replicate the sensitivity analysis can be found at :cite:t:`hadka_project-platypusrhodium_2017`. Panels (a-c) are used courtesy of Julianne Quinn, University of Virginia. + +The town has identified two potential policies that can be used to manage this lake, one that maximizes its economic profits (“best economic policy”) and one that maximizes the time below the tipping point (“most reliable policy”). Panels (b-c) in :numref:`Figure_4_3` add the emissions from these policies to the recycling flux and show how the equilibria points shift as a result. In both cases the stable oligotrophic equilibrium increases and the tipping point decreases, narrowing the safe operating space :cite:`anderies2019knowledge, carpenter_allowing_2015`. The best economic policy results in a much narrower space of action, with the tipping point very close to the oligotrophic equilibrium. The performance of both policies depends significantly on the system parameters. For example, a higher value of :math:`b`, the natural decay rate, would shift the red line upward, moving the equilibria points and widening the safe operating space. Inversely, a higher value of :math:`q`, the lake’s recycling rate, would shift the recycling line upward, moving the tipping point lower and decreasing the safe operating space. The assumptions under which these policies were identified are therefore critical to their performance and any potential uncertainty in the parameter values could be detrimental to the system’s objectives being met. + +Sensitivity analysis can be used to clarify the role these parameters play on policy performance. :numref:`Figure_4_3` (d) shows the results of a Sobol sensitivity analysis on the reliability of the “most reliable” policy in a radial convergence diagram. The significance of each parameter is indicated by the size of circles corresponding to it. The size of the interior dark circle indicates the parameter’s first-order effects and the size of the exterior circle indicates the parameter’s total-order effects. The thickness of the lines between two parameters indicated the extent of their interaction (second-order effects). In this case, parameters :math:`b` and :math:`q` appear to have the most significant importance on the system, followed by the mean, :math:`\mu`, of the natural inflows. All these parameters function in a manner that shifts the location of the three equilibria and therefore policies that are identified ignoring this parametric uncertainty might fail to meet their intended goals. + +It is worth mentioning that current sensitivity analysis methods are somewhat challenged in addressing several system dynamics analysis questions. The fundamental reason is that sensitivity analysis methods and tools have been developed to gauge numerical sensitivity of model output to changes in factor values. This is natural, as most simulation studies (e.g., all aforementioned examples) have been traditionally concerned with this type of sensitivity. In system dynamics modeling, however, a more important and pertinent concern is changes between regimes or between behavior modes (also known as bifurcations) as a result of changes in model factors :cite:p:`hekimouglu2016sensitivity, sterman2001system`. This poses two new challenges. First, identifying a change in regime depends on several characteristics besides a change in output value, like the rate and direction of change. Second, behavior mode changes are qualitative and discontinuous, as equilibria change in stability but also move in and out of existence. + +Despite these challenges, recent advanced sensitivity analysis methods can help illuminate which factors in a system are most important in shaping boundary conditions (tipping points) between different regimes and determining changes in behavior modes. Reviewing such methods is outside the scope of this text, but the reader is directed to the examples of :cite:t:`eker2018practice` and :cite:t:`hadjimichael2020navigating`, who apply parameterised perturbation on the functional relationships of a system to study the effects of model structural uncertainty on model outputs and bifurcations, and :cite:t:`hekimouglu2016sensitivity` and :cite:t:`steinmann2020behavior` who, following wide sampling of uncertain inputs, cluster the resulting time series in modes of behavior and identify most important factors for each. + +.. note:: + + Put this into practice! Click the following badge to try out an interactive tutorial on performing a sensitivity analysis to discover consequential dynamics: `Factor Discovery Jupyter Notebook `_ diff --git a/dev/docs/html/_sources/4.3_consequential_scenarios_what_is_controlling_consequential_outcomes.rst b/dev/docs/html/_sources/4.3_consequential_scenarios_what_is_controlling_consequential_outcomes.rst new file mode 100644 index 0000000..51736e3 --- /dev/null +++ b/dev/docs/html/_sources/4.3_consequential_scenarios_what_is_controlling_consequential_outcomes.rst @@ -0,0 +1,49 @@ +.. _consequential_scenarios: + +Consequential Scenarios: What is Controlling Consequential Outcomes? +#################################################################### + +As overviewed in :numref:`perspectives`, most models are abstractions of systems in the real world. When sufficient confidence has been established in a model, it can then act as a surrogate for the actual system, in that the consequences of potential stressors, proposed actions or other changes can be evaluated by computer model simulations :cite:`bankes_exploratory_1993`. A model simulation then represents a computational experiment, which can be used to assess how the modeled system would behave should the various changes come to be. Steven Bankes coined the term exploratory modeling to describe the use of large sets of such computational experiments to investigate their implications on the system. :numref:`Figure_4_4` presents a typical workflow of an exploratory modeling application. Exploratory modeling approaches typically use sampling designs to generate large ensembles of states that represent combinations of changes happening together, spanning the entire range of potential values a factor might take (indicated in :numref:`Figure_4_4` by numbers 2-5). This perspective on modeling is particularly relevant to studies making long term projections into the future. + +.. _Figure_4_4: +.. figure:: _static/figure4_4_exploratory_workflow.png + :alt: Figure 4_4 + :width: 700px + :figclass: margin-caption + :align: center + + A typical exploratory modeling workflow + + +In the long-term policy analysis literature, exploratory modeling has prominently placed itself as an alternative to traditional narrative scenario or assumptions-based planning approaches, in what can be summarized in the following two-pronged critique :cite:`bankes_exploratory_1993, bankes2001computer, lempert2003shaping`. The most prevalent criticism sees that the future and how it might evolve is both highly complex and deeply uncertain. Despite its benefits for interpretation and intuitive appeal, a small number of scenarios invariably misses many other potential futures that did not get selected as sufficiently representative of the future. This is especially the case for aggregate, narrative scenarios that describe simultaneous changes in multiple sectors together (e.g., "increased energy demand, combined with high agricultural land use and large economic growth"), such as the emission scenarios produced by the Intergovernmental Panel on Climate Change :cite:`lamontagne2018large`. The bias introduced by this reduced set of potential changes can skew inferences drawn from the model, particularly when the original narrative scenarios are focused on a single or narrow set of measures of system behavior. + +The second main criticism of traditional narrative scenario-based planning methods is that they provide no systematic way to distinguish which of the constituent factors lead to the undesirable consequences produced by a scenario. Narrative scenarios (e.g., the scenario matrix framework of RCPs-SSPs-SPAs; :cite:`o2014new`) encompass multiple changes happening together selected to span the range of potential changes but are not typically generated in a systematic factorial manner that considers the multiple ways the factors can be combined. This has two critical limitations. It obfuscates the role each component factor plays in the system, both in isolation and in combination with others (e.g., "is it the increased energy demand or the high agricultural land use that cause unbearable water stress?"). It also renders the delineation of how much change in a factor is critical near impossible. Consider, for example, narrative scenario A with a 5% increase in energy demand, and scenario B with a 30% increase in energy demand, which would have dire consequences. At which point between 5% and 30% do the dire consequences actually begin to occur? Such questions cannot be answered without a wide exploration of the space of potential changes. It should be noted that for some levels of model complexity and computational demands (e.g., global-scale models) there is little feasible recourse beyond the use of narrative scenarios. + +Exploratory modeling is typically paired with scenario discovery methods (indicated by number 9 in :numref:`Figure_4_4`) that identify which of the scenarios (also known as states of the world) generated indeed have consequences of interest for stakeholders and policy makers, in an approach referred to as ensemble-based scenario-discovery :cite:`bryant2010thinking, groves2007new, lempert2006general`. This approach therefore flips the planning analysis from one that attempts to predict future system conditions to one that attempts to discover the (un)desirable future conditions. Ensemble-based scenario discovery can thus inform what modeling choices yield the most consequential behavioral changes or outcomes, especially when considering deeply uncertain, scenario-informed projections :cite:`walker2013adapt,moallemi2020exploratory`. The relative likelihoods and relevance of the discovered scenarios can be subsequently evaluated by the practitioners a posteriori, within a richer context of knowing the wider set of potential consequences :cite:`dessai2009climate`. This can include changing how an analysis is framed (number 10 in :numref:`Figure_4_4`). For instance, one could initially focus on ensemble modeling of vulnerability using a single uncertain factor that is assumed to be well characterized by historical observations (e.g., streamflow; this step is represented by numbers 2-3 in :numref:`Figure_4_4`). The analysis can then shift to include projections of more factors treated as deeply uncertain (e.g., urbanization, population demands, temperature, and snow-melt) to yield a far wider space of challenging projected futures. UC experiments contrasting these two framings can be highly valuable for tracing how vulnerability inferences change as the modeled space of futures expands from the historical baseline :cite:`quinn2017direct`. + +An important nuance to be clarified here is that the focus or purpose of a modeling exercise plays a major role in whether a given factor of interest is considered well-characterized or deeply uncertain. Take the example context of characterizing temperature or streamflow extremes, where for each state variable of interest for a given location of focus there is a century of historical observations. Clearly, the observation technologies will have evolved over time uniquely for temperature and streamflow measurements and they likely lack replicate experiments (data uncertainty). A century of record will be insufficient to capture very high impact and rare extreme events (i.e., increasingly poor structural/parametric inference for the distributions of specific extreme single or compound events). The mechanistic processes as well as their evolving variability will be interdependent but uniquely different for each of these state variables. A large body of statistical literature exists focusing on the topics of synthetic weather :cite:p:`wilks1999weather,herrera2017review` or streamflow :cite:p:`lamontagne2018generating,medda2019comparison` generation that provides a rich suite of approaches for developing history-informed, well-characterized stochastic process models to better estimate rare individual or compound extremes. These history-focused approaches can be viewed as providing well-characterized quantifications of streamflow or temperature distributions; however, they do not capture how coupled natural-human processes can fundamentally change their dynamics when transitioning to projections of longer-term futures (e.g., streamflow and temperature in 2055). Consequently, changing the focus of the modeling to making long term projections of future streamflow or temperature now makes these processes deeply uncertain. + +Scenario discovery methods (number 9 in :numref:`Figure_4_4`) can be qualitative or quantitative and they generally attempt to distinguish futures in which a system or proposed policies to manage the system meet or miss their goals :cite:`groves2007new`. The emphasis placed by exploratory modeling on model outputs that have decision relevant consequences represents a shift toward a broader class of metrics that are reflective of the stakeholders’ concerns, agency and preferences (also discussed in :numref:`perspectives`). As a result, sensitivity analysis and scenario discovery methods in this context are therefore applied to performance metrics that go beyond model error but are rather focused on broader metrics such as the resilience of a sector, the reliability of a process, or the vulnerability of a population in the face of uncertainty. In exploratory modeling literature, this metric is most typically—but not always—a measure of robustness (number 8 in Fig. 13). Robustness is a property of a system or a design choice capturing its insensitivity to uncertainty and can be measured via a variety of means, most recently reviewed by :cite:t:`herman2015should` and :cite:t:`mcphail2018robustness`. + +Scenario discovery is typically performed through the use of algorithms applied on large databases of model runs, generated through exploratory modeling, with each model run representing the performance of the system in one potential state of the world. The algorithms seek to identify the combinations of factor values (e.g., future conditions) that best distinguish the cases in which the system does or does not meet its objectives. The most widely known classification algorithms are the Patient Rule Induction Method (PRIM; :cite:p:`friedman1999bump`) and Classification and Regression Trees (CART; :cite:p:`breiman1984classification`). These factor mapping algorithms create orthogonal boundaries (multi-dimensional hypercubes) between states of the world that are successful or unsuccessful in meeting the system’s goals :cite:p:`dalal2008low`. The algorithms attempt to strike a balance between simplicity of classification (and as a result, interpretability) and accuracy :cite:p:`groves2007new, bryant2010thinking, lempert2019robust`. + +Even though these approaches have been shown to yield interpretable and relevant scenarios :cite:p:`kwakkel2019supporting`, several authors have pointed out the limitations of these methods with regards to their division of space in orthogonal behavioral and non-behavioral regions :cite:p:`trindade2019deeply`. Due to their reliance on boundaries orthogonal to the uncertainty axes, PRIM and CART cannot capture interactions between the various uncertain factors considered, which can often be significant :cite:p:`quinn2018exploring`. More advanced methods have been proposed to address this drawback, with logistic regression being perhaps the most prominent :cite:p:`gold2019identifying, lamontagne2019robust, quinn2018exploring`. Logistic regression can produce boundaries that are not necessarily orthogonal to each uncertainty axis, nor necessarily linear, if interactive terms between two parameters are used to build the regression model. It also describes the probability that a state of the world belongs to the scenarios that lead to failure. This feature allows users to define regions of success based on a gradient of estimated probability of success in those worlds, unlike PRIM which only classifies states of the world in two regions :cite:p:`hadjimichael_defining_2020, quinn2018exploring`. + +Another more advanced factor mapping method is boosted trees :cite:p:`drucker1996boosting, freund1999short`. Boosted trees can avoid two limitations inherent to the application of logistic regression: i) to build a nonlinear classification model the interactive term between two uncertainties needs to be pre-specified and cannot be discovered (e.g., we need to know a-priori whether factor :math:`x_1` interacts with :math:`x_2` in a relationship that looks like :math:`x_1`·:math:`x_2` or :math:`x_1^{x_2}`); and ii) the subspaces defined are always convex. The application of such a factor mapping algorithm is limited in the presence of threshold-based rules with discrete actions in a modeled system (e.g., “if network capacity is low, build new infrastructure”), which results in failure regions that are nonlinear and non-convex :cite:p:`trindade2019deeply`. Boosting works by creating an ensemble of classifiers and forcing some of them to focus on the hard-to-learn parts of the problem, and others to focus on the easy-to-learn parts. Boosting applied to CART trees can avoid the aforementioned challenges faced by other scenario discovery methods, while resisting overfitting :cite:p:`murphy2012machine`, assuring the identified success and failure regions are still easy to interpret. + +Below we provide an example application of two scenario discovery methods, PRIM and logistic regression, using the lake problem introduced in the previous section. From the sensitivity analysis results presented in :numref:`Figure_4_3` (d), we can already infer that parameters :math:`b` and :math:`q` have important effects on model outputs (i.e., we have performed factor prioritization). Scenario discovery (i.e., factor mapping) complements this analysis by further identifying the specific values of b and q that can lead to consequential and undesirable outcomes. For the purposes of demonstration, we can assume the undesirable outcome in this case is defined as the management policy failing to achieve 90% reliability in a state of the world. + +.. _Figure_4_5: +.. figure:: _static/figure14lake_problem_SD.png + :alt: Figure 14. + :width: 700px + :figclass: margin-caption + :align: center + + Scenario discovery for the lake problem, using (a) PRIM and (b) logistic regression. + +:numref:`Figure_4_5` shows the results of scenario discovery, performed through (a) PRIM and (b) logistic regression. Each point in the two panels indicates a potential state of the world, generated through Latin Hypercube Sampling. Each point is colored by whether the policy meets the above performance criterion, with blue indicating success and red indicating failure. PRIM identifies several orthogonal areas of interest, one of which is shown in panel (a). As discussed above, this necessary orthogonality limits how PRIM identifies areas of success (the area within the box). As factors :math:`b` and :math:`q` interact in this system, the transition boundary between the regions of success and failure is not orthogonal to any of the axes. As a result, a large number of points in the bottom right and the top left of the figure are left outside of the identified region. Logistic regression can overcome this limitation by identifying a diagonal boundary between the two regions, seen in panel (b). This method also produces a gradient of estimated probability of success across these regions. + +.. note:: + + Put this into practice! Click the following link to try out an interactive tutorial on performing factor mapping using logistic regression: `Logistic Regression Jupyter Notebook `_ diff --git a/dev/docs/html/_sources/4_sensitivity_analysis_diagnostic_and_exploratory_modeling.rst b/dev/docs/html/_sources/4_sensitivity_analysis_diagnostic_and_exploratory_modeling.rst new file mode 100644 index 0000000..1737160 --- /dev/null +++ b/dev/docs/html/_sources/4_sensitivity_analysis_diagnostic_and_exploratory_modeling.rst @@ -0,0 +1,24 @@ +.. _4_sensitivity_analysis: + +******************************************************* +Sensitivity Analysis: Diagnostic & Exploratory Modeling +******************************************************* + +.. include:: 4.1_understanding_errors_what_is_controlling_model_performance.rst + +.. include:: 4.2_consequential_dynamics_what_is_controlling_model_behaviors_of_interest.rst + +.. include:: 4.3_consequential_scenarios_what_is_controlling_consequential_outcomes.rst + +.. note:: + The following articles are suggested as fundamental reading for the information presented in this section: + + * Gupta, H.V., Wagener, T., Liu, Y., 2008. Reconciling theory with observations: elements of a diagnostic approach to model evaluation. *Hydrological Processes: An International Journal* 22, 3802–3813. + * Bankes, S., 1993. Exploratory Modeling for Policy Analysis. *Operations Research* 41, 435–449. https://doi.org/10.1287/opre.41.3.435 + * Groves, D.G., Lempert, R.J., 2007. A new analytic method for finding policy-relevant scenarios. *Global Environmental Change* 17, 73–85. https://doi.org/10.1016/j.gloenvcha.2006.11.006 + + + The following articles can be used as supplemental reading: + + * Marchau, V.A.W.J., Walker, W.E., Bloemen, P.J.T.M., Popper, S.W. (Eds.), 2019. Decision Making under Deep Uncertainty: From Theory to Practice. Springer International Publishing. https://doi.org/10.1007/978-3-030-05252-2 + * Sterman, J.D., 1994. Learning in and about complex systems. System Dynamics Review 10, 291–330. https://doi.org/10.1002/sdr.4260100214 diff --git a/dev/docs/html/_sources/5_conclusion.rst b/dev/docs/html/_sources/5_conclusion.rst new file mode 100644 index 0000000..e0f058b --- /dev/null +++ b/dev/docs/html/_sources/5_conclusion.rst @@ -0,0 +1,9 @@ +.. _5_conclusion: + +********** +Conclusion +********** + +As noted in the Introduction (:numref:`introduction`), the computational and conceptual challenges of the multi-model, transdisciplinary workflows that characterize ambitious projects such as IM3 have limited UC and UQ analyses. Moreover, the very nature and purpose of modeling and diagnostic model evaluation can have very diverse philosophical framings depending on the disciplines involved (see :numref:`Figure_1_1` and :numref:`perspectives`). The guidance provided in this text can be used to frame consistent and rigorous experimental designs for better understanding the consequences and insights from our modeling choices when seeking to capture complex human-natural systems. The progression of sections of this text provide a thorough introduction of the concepts and definitions of diagnostic model evaluation, sensitivity analysis and UC. In addition, we comprehensively discuss how specific modeling objectives and applications should guide the selection of appropriate techniques; broadly, these can include model diagnostics, in-depth analysis of the behavior of the abstracted system, and projections under conditions of deep uncertainty. This text also contains a detailed presentation of the main sensitivity analysis methods and a discussion of their features and main limitations. Readers are also provided with an overview of computer tools and platforms that have been developed and could be considered in addressing IM3 scientific questions. The appendices of this text include an overview of UQ methods, a terminology glossary of the key concepts as well as example test cases and scripts to showcase various UC related capabilities. + +Although we distinguish the UC and UQ model diagnostics, the reader should note that we suggest an overall consistent approach to both in this text by emphasizing “exploratory modeling” (see review by :cite:t:`moallemi2020exploratory`). Although data support, model complexity, and computational limits strongly distinguish the feasibility and appropriateness of various UC diagnostic tools (e.g., see :numref:`Figure_3_5`), we overall recommend that modelers view their work through the lens of cycles of learning. Iterative and deliberative exploration of model-based hypotheses and inferences for transdisciplinary teams is non-trivial and ultimately critical for mapping where innovations or insights are most consequential. Overall, we recommend approaching modeling with an openness to the diverse disciplinary perspectives such as those mirrored by the IM3 family of models in a progression from evaluating models relative to observed history to advanced formalized analyses to make inferences on multi-sector, multi-scale vulnerabilities and resilience. Exploratory modeling approaches can help fashion experiments with large numbers of alternative hypotheses on the co-evolutionary dynamics of influences, stressors, as well as path-dependent changes in the form and function of coupled human-natural systems :cite:`weaver_improving_2013`. This text guides the reader through the use of sensitivity analysis and uncertainty methods across the diverse perspectives that have shaped modern diagnostic and exploratory modeling. diff --git a/dev/docs/html/_sources/6_glossary.rst b/dev/docs/html/_sources/6_glossary.rst new file mode 100644 index 0000000..758ebbf --- /dev/null +++ b/dev/docs/html/_sources/6_glossary.rst @@ -0,0 +1,59 @@ +.. _glossary: + +======== +Glossary +======== + +**Design of experiment**: Provides a framework for the extraction of all plausible information about the impact of each factor on the output of the numerical model + +**Exploratory modeling**: Use of large ensembles of uncertain conditions to discover decision-relevant combinations of uncertain factors + +**Factor**: Any model component that can affect model outputs: inputs, resolution levels, coupling relationships, model relationships and parameters. In models with acceptable model fidelity these factors may represent elements of the real-world system under study. + +**Factor mapping**: A technique to identify which uncertain model factors lead to certain model behavior + +**Factor prioritization**: A technique to identify the uncertain factors which, when fixed to their true value, would lead to the greatest reduction in output variability + +**Factor screening**: A technique to identify model components that have a negligible effect or make no significant contributions to the variability of the outputs or metrics of interest + +**First-, second-, total-order effects**: First-order effects indicate the percent of model output variance contributed by a factor individually. Second-order effects capture how interactions between a pair of parameter input variables can lead to change in model output. Total-order effects consider all the effects a factor has, individually and in interaction with other factors. + +**Hindcasting**: A type of predictive check that uses the model to estimate output for past events to see how well the output matches the known results. + +**Pre-calibration**: A hybrid uncertainty assessment method that involves identifying a plausible set of parameters using some prespecified screening criterion, such as the distance from the model results to the observations. + +**Prior**: The best assessment of the probability of an event based on existing knowledge before a new experiment is conducted + +**Posterior**: The revised or updated probability of an event after taking into account new information + +**Probabilistic inversion**: Uses additional information, for instance, a probabilistic expert assessment or survey result, to update an existing prior distribution + +**Return level**: A value that is expected to be equaled or exceeded on average once every interval of time (T) (with a probability of 1/T) + +**Return period**: The estimated time interval between events of a similar size or intensity/ + +**Sampling**: The process of selecting model parameters or inputs that characterize the model uncertainty space. + +**Scenario discovery**: Use of large ensembles of uncertain conditions to discover decision-relevant combinations of uncertain factors + +**Sensitivity analysis**: Conducted to understand the factors and processes that most (or least) control a model’s outputs + + *Local sensitivity analysis*: Model evaluation performed by varying uncertain factors around specific reference values + + *Global sensitivity analysis*: Model evaluation performed by varying uncertain factors throughout their entire feasible value space + +**Uncertainty** + +*Deep uncertainty*: Refers to situations where expert opinions consulted on a decision do not know or cannot agree on system boundaries, or the outcomes of interest and their relative importance, or the prior probability Distribution for the various uncertain factors present + +*Epistemic uncertainty*: Systematic uncertainty that comes about due to the lack of knowledge or data to choose the best model + +*Ontological uncertainty*: Uncertainties due to processes, interactions, or futures, that are not contained within current conceptual models + +*Aleatory uncertainty*: Uncertainty due to natural randomness in processes + +*Uncertainty characterization*: Model evaluation under alternative factor hypotheses to explore their implications for model output uncertainty + +*Uncertainty quantification*: Representation of model output uncertainty using probability distributions + +**Variance decomposition**: A technique to partition how much of the variability in a model’s output is due to different explanatory variables. diff --git a/dev/docs/html/_sources/A1.1_UQ_Introduction.rst b/dev/docs/html/_sources/A1.1_UQ_Introduction.rst new file mode 100644 index 0000000..2f8204e --- /dev/null +++ b/dev/docs/html/_sources/A1.1_UQ_Introduction.rst @@ -0,0 +1,21 @@ +Introduction +############ + +As defined in :numref:`introduction`, uncertainty quantification (UQ) refers to the formal focus on the full specification of likelihoods as well as distributional forms necessary to infer the joint probabilistic response across all modeled factors of interest :cite:`cooke1991experts`. This is in contrast to UC (the primary focus of the main document of this book), which is instead aimed at identifying which modeling choices yield the most consequential changes or outcomes and exploring alternative hypotheses related to the form and function of modeled systems :cite:`moallemi2020exploratory, walker2003defining`. + +UQ is important for quantifying the relative merits of hypotheses for at least three main reasons. First, identifying model parameters that are consistent with observations is an important part of model development. Due to several effects, including correlations between parameters, simplified or incomplete model structures (relative to the full real-world dynamics), and uncertainty in the observations, many different combinations of parameter values can be consistent with the model structure and the observations to varying extents. Accounting for this uncertainty is conceptually preferable to selecting a single “best fit” parameter vector, particularly as consistency with historical or present observations does not necessarily guarantee skillful future projections. + +The act of quantification requires specific assumptions about distributional forms and likelihoods, which may be more or less justified depending on prior information about the system or model behavior. As a result, UQ is well-suited for studies accounting for or addressing hypotheses related to systems with a relatively large amount of available data and models which are computationally inexpensive, particularly when the emphasis is on prediction. As shown in :numref:`Figure_A1_1`, there is a fundamental tradeoff between the available number of model evaluations (for a fixed computational budget) and the number of parameters treated as uncertain. Sensitivity analyses are therefore part of a typical UQ workflow to identify which factors can be fixed and which ought to be prioritized in the UQ. + +.. _Figure_A1_1: +.. figure:: _static/figureA1_1_UQ_approaches.png + :alt: Figure A1.1 + :width: 700px + :figclass: margin-caption + :align: center + + Overview of selected existing approaches for uncertainty quantification and their appropriateness given the number of uncertain model parameters and the number of available model simulations. Green shading denotes regions suitable for uncertainty quantification and red shading indicates regions more appropriate for uncertainty characterization. + +The choice of a particular UQ method depends on both the desired level of quantification and the ability to navigate the tradeoff between computational expense and the number of uncertain parameters (:numref:`Figure_A1_1`). For example, Markov chain Monte Carlo with a full system model can provide an improved representation of uncertainty compared to the coarser pre-calibration approach :cite:`ruckert_assessing_2017`, but requires many more model evaluations. The use of a surrogate model to approximate the full system model can reduce the number of needed model evaluations by several orders of magnitude, but the uncertainty quantification can only accommodate a limited number of parameters. + +The remainder of this appendix will focus on introducing workflows for particular UQ methods, including a brief discussion of advantages and limitations. \ No newline at end of file diff --git a/dev/docs/html/_sources/A1.2_Parametric_Bootstrap.rst b/dev/docs/html/_sources/A1.2_Parametric_Bootstrap.rst new file mode 100644 index 0000000..2cd2076 --- /dev/null +++ b/dev/docs/html/_sources/A1.2_Parametric_Bootstrap.rst @@ -0,0 +1,17 @@ +Parametric Bootstrap +#################### + +The parametric bootstrap :cite:p:`efron_bootstrap_1986` refers to a process of model recalibration to alternate realizations of the data. The bootstrap was originally developed to estimate standard errors and confidence intervals without ascertaining key assumptions that might not hold given the available data. In a setting where observations can be viewed as independent realizations of an underlying stochastic process, a sufficiently rich dataset can be treated as a population representing the data distribution. New datasets are then generated by resampling from the data with replacement, and the model can be refit to each new dataset using maximum-likelihood estimation. The resulting distribution of estimates can then be viewed as a representation of parametric uncertainty. + +A typical workflow for the parametric bootstrap is shown in :numref:`Figure_A1_2`. After identifying outputs of interest and preparing the data, the parametric model is fit by some procedure such as minimizing root-mean-square-error or maximizing the likelihood. Alternate datasets are constructed by resampling from the population or by generating new samples from the fitted data-generating process. It is important at this step that the resampled quantities are independent of one another. For example, in the context of temporally- or spatially-correlated data, such as time series, the raw observations cannot be treated as independent realizations. However, the residuals resulting from fitting the model to the data could be (depending on their structure). For example, if the residuals are treated as independent, they can then be resampled with replacement, and these residuals added to the original model fit to create new realizations. If the residuals are assumed to be the result of an autoregressive process, this process could be fit to the original residual series and new residuals be created using this model :cite:`sriver_characterizing_2018`. The model is then refit to each new realization. + +.. _Figure_A1_2: +.. figure:: _static/figureA1_2_bootstrap_workflow.png + :alt: Figure A1.2 + :width: 700px + :figclass: margin-caption + :align: center + + Workflow for the parametric bootstrap. + +The bootstrap is computationally convenient, particularly as the process of fitting the model to each realization can be easily parallelized. This approach also requires minimal prior assumptions. However, due to the assumption that the available data are representative of the underlying data distribution, the bootstrap can neglect key uncertainties which might influence the results. For example, when using an autoregressive process to generate new residuals, uncertainty in the autocorrelation parameter and innovation variance is neglected, which may bias estimates of, for example, low-probability but high-impact events :cite:`ruckert_effects_2017`. diff --git a/dev/docs/html/_sources/A1.3_Precalibration.rst b/dev/docs/html/_sources/A1.3_Precalibration.rst new file mode 100644 index 0000000..425e029 --- /dev/null +++ b/dev/docs/html/_sources/A1.3_Precalibration.rst @@ -0,0 +1,24 @@ +Pre-Calibration +############### + +Pre-calibration :cite:`beven_future_1992, edwards_precalibrating_2011, boukouvalas_bayesian_2014` involves the identification of a plausible set of parameters using some prespecified screening criterion, such as the distance from the model results to the observations (based on an appropriate metric for the desired matching features, such as root-mean-squared error). A typical workflow is shown in :numref:`Figure_A1_3`. Parameter values are obtained by systematically sampling the input space (see :numref:`design_of_experiments`). After the model is evaluated at the samples, only those passing the distance criterion are retained. This selects a subset of the parameter space as “plausible” based on the screening criterion, though there is no assignment of probabilities within this plausible region. + +.. _Figure_A1_3: +.. figure:: _static/figureA1_3_precal_workflow.png + :alt: Figure A1.3 + :width: 700px + :figclass: margin-caption + :align: center + + Workflow for pre-calibration. + +Pre-calibration can be useful for models which are inexpensive enough that a reasonable +number of samples can be used to represent the parameter space, but which are too expensive to facilitate full uncertainty quantification. High-dimensional parameter spaces, which can be problematic for the uncertainty quantification methods below, may also be explored using pre-calibration. One key prerequisite to using this method is the ability to place a meaningful distance metric on the output space. + +However, pre-calibration results in a very coarse characterization of uncertainty, especially when considering a large number of parameters, as more samples are needed to fully characterize the parameter space. Due to the inability to evaluate the relative probability of regions of the parameter space beyond the binary plausible-and-implausible characterization, pre-calibration can also result in degraded hindcast and projection skills and parameter estimates :cite:`makowski_using_2002, shafii_uncertainty-based_2014, ruckert_assessing_2017`. + +A related method, widely used in hydrological studies, is generalized likelihood uncertainty estimation, or GLUE :cite:`beven_future_1992`. Unlike pre-calibration, the underlying argument for GLUE relies on the concept of equifinality :cite:`beven_equifinality_2001`, which posits that it is impossible to find a uniquely well-performing parameter vector for models of abstract environmental systems :cite:`beven_equifinality_2001, vrugt_embracing_2018`. In other words, there exist multiple parameter vectors which perform equally or similarly well. As with pre-calibration, GLUE uses a goodness-of-fit measure (though this is called a “likelihood” in the GLUE literature, as opposed to a statistical likelihood function :cite:`stedinger_appraisal_2008`) to evaluate samples. After setting a threshold of acceptable performance with respect to that measure, samples are evaluated and classified into “behavioral” or “non-behavioral” according to the threshold. + +.. note:: + + Put this into practice! Click the following badge to try out an interactive tutorial on utilizing Pre-Calibration and GLUE for HYMOD model calibration: `Pre-Calibration Jupyter Notebook `_ diff --git a/dev/docs/html/_sources/A1.4_Markov_Chain_Monte_Carlo.rst b/dev/docs/html/_sources/A1.4_Markov_Chain_Monte_Carlo.rst new file mode 100644 index 0000000..afc3a23 --- /dev/null +++ b/dev/docs/html/_sources/A1.4_Markov_Chain_Monte_Carlo.rst @@ -0,0 +1,23 @@ +Markov Chain Monte Carlo +######################## + +Markov chain Monte Carlo (MCMC) is a “gold standard” approach to full uncertainty quantification. MCMC refers to a category of algorithms which systematically sample from a target distribution (in this case, the posterior distribution) by constructing a Markov chain. A Markov chain is a probabilistic structure consisting of a state space, an initial probability distribution over the states, and a transition distribution between states. If a Markov chain satisfies certain properties :cite:p:`robert_monte_2013, robert_metropolishastings_2015`, the probability of being in each state will eventually converge to a stable, or stationary, distribution, regardless of the initial probabilities. + +MCMC algorithms construct a Markov chain of samples from a parameter space (the combination of model and statistical parameters). This Markov chain is constructed so that the stationary distribution is a target distribution, in this case the (Bayesian) posterior distribution. As a result, after the transient period, the resulting samples can be viewed as a set of dependent samples from the posterior (the dependence is due to the autocorrelation between samples resulting from the Markov chain transitions). Expected values can be computed from these samples (for example, using batch-means estimators :cite:p:`flegal_markov_2008`), or the chain can be sub-sampled or thinned and the resulting samples used as independent Monte Carlo samples due to the reduced or eliminated autocorrelation. + +A general workflow for MCMC is shown in :numref:`Figure_A1_4`. The first decision is whether to use the full model or a surrogate model (or emulator). Typical surrogates include Gaussian process emulation :cite:p:`currin_bayesian_1991, sacks_design_1989`, polynomial chaos expansions :cite:p:`ghanem_spectral_1991, xiu_wiener--askey_2002`, support vector machines :cite:p:`ciccazzo_svm_2016, pruett_creation_2016`, and neural networks :cite:p:`eason_adaptive_2014, gorissen_sequential_2009`. Surrogate modeling can be faster, but requires a sufficient number of model evaluations for the surrogate to accurately represent the model’s response surface, and this typically limits the number of parameters which can be included in the analysis. + +.. _Figure_A1_4: +.. figure:: _static/figureA1_4_mcmc_workflow.png + :alt: Figure A1.4 + :width: 700px + :figclass: margin-caption + :align: center + + Workflow for Markov chain Monte Carlo. + +After selecting the variables which will be treated as uncertain, the next step is to specify the likelihood based on the selected surrogate model or the structure of the data-model residuals. For example, it may not always be appropriate to treat the residuals as independent and identically distributed (as is commonly done in linear regression). A mis-specification of the residual structure can result in biases and over- or under-confident inferences and projections :cite:p:`brynjarsdottir_learning_2014`. + +After specifying the prior distributions (see :numref:`critical_first_step`), the selected MCMC algorithm should be used to draw samples from the posterior distribution. There are many MCMC algorithms, all of which have advantages and disadvantages for a particular problem. These include the Metropolis-Hastings algorithm :cite:p:`robert_metropolishastings_2015` and Hamiltonian Monte Carlo :cite:p:`betancourt_conceptual_2017, neal_mcmc_2011`. Software packages typically implement one MCMC method, sometimes designed for a particular problem setting or likelihood specification. For example, R’s *adaptMCMC* implements an adaptive Metropolis-Hastings algorithm :cite:p:`vihola_robust_2012`, while *NIMBLE* :cite:p:`valpine_programming_2017, nimble_development_team_nimble_2021` uses a user-customizable Metropolis-Hastings implementation, as well as functionality for Gibbs sampling (which is a special case of Metropolis-Hastings where the prior distribution has a convenient mathematical form). Some recent implementations, such as *Stan* :cite:p:`stan_development_team_stan_2021`, *pyMC3* :cite:p:`salvatier_probabilistic_2016`, and *Turing* :cite:p:`ge_turing_2018` allow different algorithms to be used. + +A main consideration when using MCMC algorithms is testing for convergence to the target distribution. As convergence is guaranteed only for a sufficiently large number of transitions, it is impossible to conclude for certain that a chain has converged for a fixed number of iterations. However, several heuristics have been developed :cite:p:`flegal_markov_2008, gelman_inference_1992` to increase evidence that convergence has occurred. diff --git a/dev/docs/html/_sources/A1.5_Other_methods.rst b/dev/docs/html/_sources/A1.5_Other_methods.rst new file mode 100644 index 0000000..c290efc --- /dev/null +++ b/dev/docs/html/_sources/A1.5_Other_methods.rst @@ -0,0 +1,6 @@ +Other Methods +############# + +Other common methods for UQ exist. These include sequential Monte Carlo, otherwise known as particle filtering :cite:p:`del_moral_sequential_2006, doucet_sequential_2000, liu_combined_2001`, where a number of particles are used to evaluate samples. An advantage of sequential Monte Carlo is that the vast majority of the computation can be parallelized, unlike with standard MCMC. A major weakness is the potential for degeneracy :cite:p:`doucet_sequential_2000`, where many particles have extremely small weights, resulting in the effective use of only a few samples. + +Another method is approximate Bayesian computation (ABC) :cite:p:`cabras_approximate_2015, lintusaari_fundamentals_2017, sunnaker_approximate_2013`. ABC is a likelihood-free approach that compares model output to a set of summary statistics. ABC is therefore well-suited for models and residual structures which do not lend themselves to a computationally-tractable likelihood, but the resulting inferences are known to be biased if the set of summary statistics is not sufficient, which can be difficult to know a-priori. diff --git a/dev/docs/html/_sources/A1.6_Critical_first_step.rst b/dev/docs/html/_sources/A1.6_Critical_first_step.rst new file mode 100644 index 0000000..2b03200 --- /dev/null +++ b/dev/docs/html/_sources/A1.6_Critical_first_step.rst @@ -0,0 +1,23 @@ +.. _critical_first_step: + +The Critical First Step: How to Choose a Prior Distribution +########################################################### + +Prior distributions play an important role in Bayesian uncertainty quantification, particularly when data is limited relative to the dimension of the model. Bayesian updating can be thought of as an information filter, where each additional datum is added to the information contained in the prior; eventually, the prior makes relatively little impact. In real world problems, it can be extremely difficult to assess how much data is required for the choice of prior to become less relevant. The choice of prior can also be influential when conducting SA prior to or without UQ. This is because a prior distribution for a sensitivity analysis for a given parameter which is much wider than the region where the model response surface is sensitive to the parameter value might cause the sensitivity calculation to underestimate the response in that potentially critical region. Similarly, a prior which is too narrow may miss regions where the model responds to the parameter altogether. + +Ideally, prior distributions are constructed independently of any analysis of the new data considered. This is because using data to inform the prior as well as to compute the likelihood reuses information in a potentially inappropriate way, which can lead to overconfident inferences. Following :cite:t:`jaynes_probability_1996, gelman_prior_2017` refers to the ideal prior as one which encodes all available information about the model. For practical reasons (difficulty of construction or computational inconvenience), most priors fail to achieve this ideal. These compromises mean that priors should be transparently articulated and justified, so that the impact of the choice of prior can be fully understood. When there is ambiguity about an appropriate prior, such as how fat the tails should be, an analyst should examine how sensitive the UQ results are to the choice of prior. + +Priors can also be classified in terms of the information encoded by them, demonstrated in :numref:`Figure_A1_5`. Non-informative priors (illustrated in :numref:`Figure_A1_5` (a)) allegedly correspond to (and are frequently justified by) a position of ignorance. A classic example is the use of a uniform distribution. A uniform prior can, however, be problematic, as it can lead to improper inferences by giving extremely large values the same prior probability as values which may seem more likely :cite:p:`gelman_prior_2017, robert_bayesian_2007`, and therefore does not really reflect a state of complete ignorance. In the extreme case of a uniform prior over the entire real line, every particular region has effectively a prior weight of zero, even though not all regions are a priori unlikely :cite:p:`robert_bayesian_2007`. Moreover, a uniform prior which excludes possible parameter values is not actually noninformative, as it assigns zero probability to those values while jumping to a nonzero probability as soon as the boundary is crossed. While a uniform prior can be problematic for the task of uncertainty quantification, it may be useful for an initial sensitivity analysis to identify the boundary of any regions where the model is sensitive to the parameter. + +.. _Figure_A1_5: +.. figure:: _static/figureA1_5_priors_posteriors.png + :alt: Figure A1.5 + :width: 700px + :figclass: margin-caption + :align: center + + Impact of priors on posterior inferences. These plots show the results of inference for a linear regression model with 15 data points. The true value of the parameter is equal to -3. All priors have mean 0. In panel (a), a non-informative prior allows the tails of the posterior to extend freely, which may result in unreasonably large parameter values. In panel (b), a weakly informative prior constrains the tails more, but allows them to extend without too much restriction. In panel (c), an informative prior strongly constrains the tails of the posterior and biases the inference closer towards the prior mean (the posterior mean is -0.89 in this case, and closer to -3 in the other two cases). + +Informative priors strongly bound the range of probable values (illustrated in :numref:`Figure_A1_5` (c)). One example is a Gaussian distribution with a relatively small standard deviation, so that large values are assigned a close to null prior probability. Another example is the jump from zero to non-zero probability occurring at the truncation point of a truncated Gaussian, which could be justified based on information that the parameter cannot take on values beyond this point. Without this type of justification, however, priors may be too informative, failing to allow the information contained in the available data to update them. + +Finally, weakly informative priors (illustrated in :numref:`Figure_A1_5` (b)) fall in between :cite:p:`gelman_prior_2017`. They regularize better than non-informative priors, but allow for more inference flexibility than fully informative priors. An example might be a Gaussian distribution with a moderate standard deviation, which still assigns negligible probability for values far away from the mean, but is less constrained than a narrow Gaussian for a reasonably large area. A key note is that it is not necessarily better to be more informative if this cannot be justified by the available information. \ No newline at end of file diff --git a/dev/docs/html/_sources/A1.7_Critical_final_step.rst b/dev/docs/html/_sources/A1.7_Critical_final_step.rst new file mode 100644 index 0000000..721b8f2 --- /dev/null +++ b/dev/docs/html/_sources/A1.7_Critical_final_step.rst @@ -0,0 +1,10 @@ +The Critical Final Step: Predictive Checks +########################################## + +Every UQ workflow requires a number of choices, potentially including selecting prior distributions, the likelihood specification, and any used numerical models. Checking the appropriateness of these choices is an essential step for sound inferences, as misspecification can produce biased results :cite:p:`brynjarsdottir_learning_2014`. Model checking in this fashion is part of an iterative UQ process, as the results can reveal adjustments to the statistical model or the need to select a different numerical model :cite:p:`gelman_posterior_1996, gelman_bayesian_2020, gelman_philosophy_2013`. + +A classic example is the need to check the structure of residuals for correlations. Many standard statistical models, such as linear regression, assume that the residuals are independent and identically distributed from the error distribution. The presence of correlations, including temporal autocorrelations and spatial correlations, indicates a structural mismatch between the likelihood and the data. In these cases, the likelihood should be adjusted to account for these correlations. + +Checking residuals in this fashion is one example of a predictive check (or a posterior predictive check in the Bayesian setting). One way to view UQ is as a means to recover data-generating processes (associated with each parameter vector) consistent with the observations. Predictive checks compare the inferred data-generating process to the observations to determine whether the model is capable of appropriately capturing uncertainty. After conducting the UQ analysis, alternatively realized datasets are simulated from sampled parameters. These alternative datasets, or their summary statistics, can be tested against the observations to determine adequacy of the fit. Predictive checks are therefore a way of probing various model components to identify shortcomings that might result in biased inferences or poor projections, depending on the goal of the analysis. + +One example of a graphical predictive check for time series models is hindcasting, where predictive intervals are constructed from the alternative datasets and plotted along with the data. Hindcasts demonstrate how well the model is capable of capturing the broader dynamics of the data, as well as whether the parameter distributions produce appropriate levels of output uncertainty. A related quantitative check is the surprise index, which calculates the percentage of data points located within a fixed predictive interval. For example, the 90% predictive interval should contain approximately 90% of the data. More uncertainty than this reflects underconfidence, while less uncertainty reflects overconfidence. This could be the result of priors that are not appropriately informative, or a likelihood that does not account for correlations between data points appropriately. It could also be the result of a numerical model that isn’t sufficiently sensitive to the parameters that are treated as uncertain. diff --git a/dev/docs/html/_sources/A1.8_Key_take_home.rst b/dev/docs/html/_sources/A1.8_Key_take_home.rst new file mode 100644 index 0000000..87fed11 --- /dev/null +++ b/dev/docs/html/_sources/A1.8_Key_take_home.rst @@ -0,0 +1,16 @@ +Key Take-Home Points +#################### + +When appropriate, UQ is an important component of the exploratory modeling workflow. While a number of parameter sets could be consistent with observations, they may result in divergent model outputs when exposed to different future conditions. This can result in identifying risks which are not visible when selecting a “best fit” parameterization. Quantifying uncertainties also allows us to quantify the support for hypotheses, which is an essential part of the scientific process. + +Due to the scale and complexity of the experiments taking place in IM3, UQ has not been extensively used. The tradeoff between the available number of function evaluations and the number of uncertain parameters illustrated in :numref:`Figure_A1_1` is particularly challenging due to the increasing complexity of state-of-the-art models and the movement towards coupled, multisector models. This tradeoff can be addressed somewhat through the use of emulators and parallelizable methods. In particular, when attempting to navigate this tradeoff by limiting the number of uncertain parameters, it is important to carefully iterate with sensitivity analyses to ensure that critical parameters are identified. + +Specifying prior distributions and likelihoods is an ongoing challenge. Prior distributions, in particular, should be treated as deeply uncertain when appropriate. One key advantage of the methods described in this chapter is that they have the potential for increased transparency. When it is not possible to conduct a sensitivity analysis on a number of critical priors due to limited computational budgets, fully specifying and providing a justification for the utilized distributions allows other researchers to identify key assumptions and build on existing work. The same is true for the specification of likelihoods---while likelihood-free methods avoid the need to specify a likelihood function, they require other assumptions or choices, which should be described and justified as transparently as possible. + +We conclude this appendix with some key recommendations: +1. UQ analysis does not require full confidence in priors and likelihoods. Rather, UQ should be treated as part of an exploratory modeling workflow, where hypotheses related to model structures, prior distributions, and likelihoods can be tested. +2. For complex multisectoral models, UQ will typically require the use of a reduced set of parameters, either through emulation or by fixing the others to their best-fit values. These parameters should be selected through a careful sensitivity analysis. +3. Avoid the use of supposedly “non-informative” priors, such as uniform priors, whenever possible. In the absence of strong information about parameter values, the use of weakly informative priors, such as diffuse normals, is preferable. +4. Be cognizant of the limitations of conclusions that can be drawn by using each method. The bootstrap, for example, may result in overconfidence if the dataset is limited and is not truly representative of the underlying stochastic process. +5. When using MCMC, Markov chains can not be shown to have converged to the target distribution, but rather evidence can be collected to demonstrate that it is likely that they have. +6. Conduct predictive checks based on the assumptions underlying the choices made in the analysis, and iteratively update those choices if the assumptions prove to be ill-suited for the problem at hand. diff --git a/dev/docs/html/_sources/A1_Uncertainty_Quantification.rst b/dev/docs/html/_sources/A1_Uncertainty_Quantification.rst new file mode 100644 index 0000000..68718d4 --- /dev/null +++ b/dev/docs/html/_sources/A1_Uncertainty_Quantification.rst @@ -0,0 +1,18 @@ +.. raw:: latex + + \appendix + +.. _A1_uncertainty_quantification: + +************************** +Uncertainty Quantification +************************** + +.. include:: A1.1_UQ_Introduction.rst +.. include:: A1.2_Parametric_Bootstrap.rst +.. include:: A1.3_Precalibration.rst +.. include:: A1.4_Markov_Chain_Monte_Carlo.rst +.. include:: A1.5_Other_methods.rst +.. include:: A1.6_Critical_first_step.rst +.. include:: A1.7_Critical_final_step.rst +.. include:: A1.8_Key_take_home.rst diff --git a/dev/docs/html/_sources/A2.1_fishgame.rst b/dev/docs/html/_sources/A2.1_fishgame.rst new file mode 100644 index 0000000..783399b --- /dev/null +++ b/dev/docs/html/_sources/A2.1_fishgame.rst @@ -0,0 +1,416 @@ +Fishery Dynamics Tutorial +************************* + +.. note:: + + | Run the tutorial interactively: `Fishery Dynamics Notebook `_. + | Please be aware that notebooks can take a couple minutes to launch. + | To run the notebooks yourself, download the files `here `_ and use these `requirements `_. + + +Tutorial: Sensitivity Analysis (SA) to discover factors shaping consequential dynamics +====================================================================================== + +This notebook demonstrates the application of sensitivity analysis to +discover factors that shape the behavior modes of a socio-ecological +system with dynamic human action. + +The system of differential equations below represent a system of prey +(defined in the equation below as x) and predator (defined as y) fish, +with a human actor harvesting the prey fish. You can read more about +this system at `Hadjimichael et +al. (2020) `__. + + +.. image:: _static/eqn2.png + +The table below defines the parameters in the system and also denotes +the baseline and ranges associated with each uncertain parameter. + +.. image:: _static/table1.png + +The system is simple but very rich in the dynamic behaviors it exhibits. +This complexity is accompanied by the presence of several equilibria +that come in and out of existence with different parameter values. The +equilibria also change in their stability according to different +parameter values, giving rise to different behavior modes as shown by +the diverse predator and prey abundace trajectories in the figure below. + +.. image:: _static/Figure_1.png + +In the unharvested system (without the human actor) the stability of +several of these equilibria can be derived analytically. The task +becomes significantly more difficult when the adaptive human actor is +introduced, deciding to harvest the system at different rates according +to their objectives and preferences. + +Sensitivity analysis methods can help us identify the factors that most +control these dynamics by exploring the space of parameter values and +seeing how system outputs change as a result. + +Through previously conducted optimization, there already exists a set of +potential harvesting strategies that were identified in pursuit of five +objectives: + +- Maximize Harvesting Discounted Profits (Net Present Value) +- Minimize Prey Population Deficit +- Minimize Longest Duration of Consecutive Low Harvest +- Maximize Worst Harvest Instance +- Minimize Harvest Variance + +The identified harvesting strategies also meet the necessary constraint +of not causing inadvertent predator collapse. + +We will be examining the effects of parametric uncertainty on these +identified strategies, particularly focusing on two strategies: one +selected to maximize harvesting profits and one identified through +previous analysis to perform ‘well enough’ for all objectives across a +wide range of states of the world (referred to as the ‘robust’ +harvesting policy). + +Let’s get started! +------------------ + +In this tutorial, we will be loading in data that has been produced in +Hadjimichael et al. (2020). Before we start our analysis, we’ll load the +relevant Python libraries. **NOTE**: To step through the notebook, +execute each gray (code) box by typing “Shift+Enter”. + +.. code:: ipython3 + + #Import necessary libraries + + import msdbook + import numpy as np + import matplotlib.pyplot as plt + from SALib.sample import saltelli + from SALib.analyze import sobol + from matplotlib import patheffects as pe + + # load example data + msdbook.install_package_data() + + %matplotlib inline + %config InlineBackend.print_figure_kwargs = {'bbox_inches':None} + +.. parsed-literal:: + + Downloading example data for msdbook version 0.1.5... + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/uncertain_params_bounds.txt + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_metric_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_delta.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/solutions.resultfile + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_heatmap.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LHsamples_original_1000.txt + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_pseudo_r_scores.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/param_values.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_delta.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_pseudo_r_scores.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/collapse_days.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_params_256samples.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_heatmap.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_heatmap.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_delta.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_pseudo_r_scores.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LeafCatch.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_simulations_256samples.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/Robustness.txt + +Step 1: Load identified solutions and explore performance +--------------------------------------------------------- + +Here we load in the solution set obtained in Hadjimichael et al. (2020). +The solution set contains the decision variables and objectives +associated with a variety of harvesting policies. For this tutorial, we +focus on comparing two policies: harvesting profits and one that +performs robustly across all objectives. Below, we are reading in the +decision variables and objectives from an external file that can be +found within the msdbook package data. + +.. code:: ipython3 + + robustness = msdbook.load_robustness_data() + results = msdbook.load_profit_maximization_data() + + robust_solution = np.argmax(robustness[:,-1]) #pick robust solution + profit_solution = np.argmin(results[:,6]) #pick profitable solution + objective_performance = -results[:,6:] #Retain objective values + + # Get decision variables for each of the policies + highprofitpolicy = results[profit_solution,0:6] + mostrobustpolicy = results[robust_solution,0:6] + + +Next we plot the identified solutions with regards to their objective +performance in a parallel axis plot + +.. tip:: View the source code used to create this plot here: `plot_objective_performance `_ + +.. code:: ipython3 + + ax, ax1 = msdbook.plot_objective_performance(objective_performance, profit_solution, robust_solution) + + +.. image:: _static/fishery_output_6_0.png + + +The solution set from the optimization in Hadjimichael et al. (2020) are +presented in a parallel axis plot where each of the five objectives (and +one constraint) are represented as an axis. Each solution on the Pareto +front is represented as a line where the color of the line indicates the +value of the NPV objective. The preference for objective values is in +the upward direction. Therefore, the ideal solution would be a line +straight across the top of the plot that satisfies every objective. +However, no such line exists because there are tradeoffs when sets of +objectives are prioritized over the others. When lines cross in between +axes, this indicates a tradeoff between objectives (as seen in the first +two axes).The solution that is most robust in the NPV objective has the +highest value on the first axis and is outlined in dark gold. The +solution that is most robust across all objectives is outlined in a +brighter yellow. A parallel axis is an effective visual to characterize +high-dimensional tradeoffs in the system and visualize differences in +performance across policies. + +Step 2: Use SALib to generate a sample for a Sobol sensitivity analysis +----------------------------------------------------------------------- + +In Step 1, we showed how the optimized harvesting policies performed in +the objective space, which utilized the baseline parameters outlined in +the table above. Now, we are interested in understanding how sensitive +our two policies are to alternative states of the world that may be +characterized by different parameter values. To do so, we first need to +define the problem dictionary that allows us to generate these +alternative states of the world. + +.. code:: ipython3 + + # Set up SALib problem + problem = { + 'num_vars': 9, + 'names': ['a', 'b', 'c', 'd', 'h', 'K', 'm', 'sigmaX', 'sigmaY'], + 'bounds': [[0.002, 2], [0.005, 1], [0.2, 1], [0.05, 0.2], [0.001, 1], + [100, 5000], [0.1, 1.5], [0.001, 0.01], [0.001, 0.01]] + } + + +Then we use the following command to generate a Saltelli sample from +these defined ranges: + +.. code:: python + + param_values = saltelli.sample(problem, 1024, calc_second_order=False) + +Generally, it is a good idea to save the result of the sample since it +is often reused and regenerating it produces a different sample set. For +this reason, we will load one from file that was previously generated. + +.. code:: ipython3 + + # load previously generated Saltelli sample from our msdbook package data + param_values = msdbook.load_saltelli_param_values() + + +Step 3: Evaluate the system over all generated states of the world +------------------------------------------------------------------ + +Now we re-evaluate how well the policies do in the new states of the +world. In order to characterize failure of a policy, we identify the +states where the predator population collapses, as an inadvertent +consequence of applying the harvesting strategy under a state of the +world different from the one originally assumed. Due to how long this +step takes to execute within the tutorial, we will read in the solutions +from an external file. However, the block of code below shows how +evaluation can be implemented. + +.. code:: python + + # create array to store collapse values under both policies + collapse_days = np.zeros([len(param_values), 2]) + + # evaluate performance under every state + for i in range(len(param_values)): + + additional_inputs = np.append(['Previous_Prey'], + [param_values[i,0], + param_values[i,1], + param_values[i,2], + param_values[i,3], + param_values[i,4], + param_values[i,5], + param_values[i,6], + param_values[i,7], + param_values[i,8]]) + + collapse_days[i,0]=fish_game(highprofitpolicy, additional_inputs)[1][0] + collapse_days[i,1]=fish_game(mostrobustpolicy, additional_inputs)[1][0] + +.. code:: ipython3 + + # load the simulation data from our msdbook package data + collapse_days = msdbook.load_collapse_data() + + +Step 4: Calculate sensitivity indices +------------------------------------- + +Now we use a Sobol sensitivity analysis to calculate first-order, +second-order, and total-order sensitivity indices for each parameter and +for each of the two policies. These indicies help determine which +factors explain the most variability in the number of days of predator +population collapse. + +.. code:: ipython3 + + #Perform the Sobol SA for the profit-maximizing solution + Si_profit = sobol.analyze(problem, collapse_days[:, 0], + calc_second_order=False, + conf_level=0.95, + print_to_console=True) + + +.. code:: ipython3 + + #Perform the Sobol SA for the robust solution + Si_robustness = sobol.analyze(problem, + collapse_days[:, 1], + calc_second_order=False, + conf_level=0.95, + print_to_console=True) + + +.. parsed-literal:: + + ST ST_conf + a 0.226402 0.036146 + b 0.066819 0.013347 + c 0.004395 0.004023 + d 0.024509 0.006993 + h 0.009765 0.005488 + K 0.020625 0.009494 + m 0.897971 0.066470 + sigmaX 0.000136 0.000149 + sigmaY 0.000739 0.001040 + S1 S1_conf + a 0.087936 0.044236 + b 0.000554 0.021474 + c -0.002970 0.004590 + d 0.001206 0.015881 + h 0.004554 0.007998 + K 0.003843 0.012661 + m 0.751301 0.071862 + sigmaX -0.000325 0.001245 + sigmaY -0.001887 0.002768 + + +Looking at the total-order indices, (ST) factors :math:`m`, :math:`a`, +:math:`b`, :math:`d` and :math:`K` explain a non-negligible amount of +variance therefore have an effect on the stability of this system. +Looking at the first-order indices (S1), we also see that besides +factors :math:`m` and :math:`a`, all other factors are important in this +system through their interactions, which make up the difference between +their S1 and ST indices. This shows the danger of limiting sensitivity +analyses to first order effects, as factor importance might be +significantly misjudged. + +These findings are supported by the analytical condition of equilibrium +stability in this system: + +.. image:: _static/eqn4.png + +In an unharvested system, this condition is both necessary and +sufficient for the equilibrium of the two species coexisting to be +stable. + +When adaptive human action is introduced however, this condition is +still necessary, but no longer sufficient, as harvesting reduces the +numbers of prey fish and as a result reduces the resources for the +predator fish. Since this harvesting value is not constant, but can +dynamically adapt according to the harvester’s objectives, it cannot be +introduced into this simple equation. + +Step 5: Explore relationship between uncertain factors and performance +---------------------------------------------------------------------- + +In the following steps, we will use the results of our sensitivity +analysis to investigate the relationships between parametric +uncertainty, equilibrium stability and the performance of the two +policies. + +We can use the top three factors identified (:math:`m`, :math:`a`, and +:math:`b`) to visualize the performance of our policies in this +three-dimensional parametric space. + +We first define the stability condition, as a function of :math:`b` and +:math:`m`, and calculate the corresponding values of :math:`a`. + +.. code:: ipython3 + + def inequality(b, m, h, K): + return ((b**m)/(h*K)**(1-m)) + + # boundary interval that separates successful and failed states of the world + b = np.linspace(start=0.005, stop=1, num=1000) + m = np.linspace(start=0.1, stop=1.5, num=1000) + h = np.linspace(start=0.001, stop=1, num=1000) + K = np.linspace(start=100, stop=2000, num=1000) + b, m = np.meshgrid(b, m) + a = inequality(b, m, h, K) + a = a.clip(0,2) + + +.. tip:: View the source code used to create this plot here: `plot_factor_performance `_ + +.. code:: ipython3 + + # generate plot + ax1, ax2 = msdbook.plot_factor_performance(param_values, collapse_days, b, m, a) + + +.. image:: _static/fishery_output_22_0.png + + +These figures show the combinations of factors that lead to success or +failure in different states of the world for the profit-maximizing and +robust policies. Each point is a state of the world, characterized by +specific values of the parameters, and ideally, we would like the color +of the point to be blue, to represent that there are a low number of +days with a predator collapse in that world. The gray curve denotes the +highly non-linear nature of the boundary, defined by the stability +condition, that separates successful and failed states of the world. The +figures demonstrate the following key points: + +First, as asserted above, the policies interact with the system in +different and complex ways. In the presence of human action, the +stability condition is not sufficient in determining whether the policy +will succeed, even though it clearly shapes the system in a fundamental +manner. + +Secondly, the robust policy manages to avoid collapse in many more of the sampled states of the world, indicated by the number of blue points. The robust policy avoids collapse in 31% of worlds versus 14% in the profit-maximizing policy.This presents a clear tradeoff between profit-maximizing performance androbustness against uncertainty. + +Tips to Apply Sobol SA and Scenario Discovery to your Problem +------------------------------------------------------------- + +In this tutorial, we demonstrated a Sobol SA to identify the most +important factors driving the behavior of a system (i.e. the number of +the collapse days). In order to apply this methodology to your problem, +you will need to have a set of optimized policies for your system that +you are interested in analyzing. The general workflow is as follows: + +1. Choose sampling bounds for your parameters and set up the problem + dictionary as in Step 2 above. +2. Generate samples, or alternative states of the world using the + ``saltelli.sample`` function. +3. Evaluate your policies on the alternative states of the world. For + your application, you will also need to develop a rule for + determining success or failure of your policy in a new SOW. In this + tutorial, success was denoted by a small number of collapse days. + Ultimately, the rule will be specific to your application and can + include various satisficing criteria. +4. Calculate the Sobol indices and discover the most important + parameters driving success and failure. +5. Finally, use a similar plotting procedure as in step 5 to identify + the combination of parameter values that lead to success and failure + in the system. diff --git a/dev/docs/html/_sources/A2.2_saltelli.rst b/dev/docs/html/_sources/A2.2_saltelli.rst new file mode 100644 index 0000000..409681a --- /dev/null +++ b/dev/docs/html/_sources/A2.2_saltelli.rst @@ -0,0 +1,302 @@ +Sobol SA Tutorial +************************* + +.. note:: + + | Run the tutorial interactively: `Sobol SA Tutorial `_. + | Please be aware that notebooks can take a couple minutes to launch. + | To run the notebooks yourself, download the files `here `_ and use these `requirements `_. + +Tutorial: Sensitivity Analysis (SA) using the Saltelli sampling scheme with Sobol SA +==================================================================================== + +In this tutorial, we will set up a workflow to investigate how sensitive +the output of a function is to its inputs. Why might you want to do +this? Imagine that this function represents a complex system, such as +the rainfall-runoff process of a watershed model, and that you, the +researcher, want to investigate how your choice of input parameter +values are affecting the model’s characterization of runoff in the +watershed. Your parameter values are likely uncertain and can take on +any value in a pre-defined range. Using a Sobol SA will allow you to +sample different values of your parameters and calculate how sensitive +your output of interest is to certain parameters. Below, we demonstrate +Sobol SA for a simple function to illustrate the method, but the +workflow can be applied to your own problem of interest! + +In order to conduct this analysis, we will use the popular Python +Sensitivity Analysis Library +(`SALib `__) to: + +#. Generate a problem set as a dictionary for our Ishigami function that has three inputs +#. Generate 2048 samples for our problem set using the Saltelli [1]_ [2]_ sampling scheme +#. Execute the Ishigami function for each of our samples and gather the outputs +#. Compute the sensitivity analysis to generate first-order and total-order sensitivity indices using the Sobol [3]_ method +#. Interpret the meaning of our results + +Let’s get started! +------------------ + +**NOTE**: Content from this tutorial is taken directly from the SALib +`“Basics” `__ +walkthrough. To step through the notebook, execute each gray (code) box +by typing “Shift+Enter”. + +.. code:: ipython3 + + #Import relevant libraries + import numpy as np + import matplotlib.pyplot as plt + from mpl_toolkits import mplot3d + + from SALib.sample import saltelli + from SALib.analyze import sobol + from SALib.test_functions import Ishigami + +Step 1: Generate the problem dictionary +--------------------------------------- + +The Ishigami function is of the form: + +.. math:: f(x_1,x_2,x_3) = sin(x_1)+asin^2(x_2)+bx_3^4sin(x_1) + +The function has three inputs, 𝑥1, 𝑥2, 𝑥3 where 𝑥𝑖 ∈ [−𝜋, 𝜋]. The +constants :math:`a` and :math:`b` are defined as 7.0 and 0.1 +respectively. + +.. code:: ipython3 + + #Create a problem dictionary. Here we supply the number of variables, the names of each variable, and the bounds of the variables. + problem = { + 'num_vars': 3, + 'names': ['x1', 'x2', 'x3'], + 'bounds': [[-3.14159265359, 3.14159265359], + [-3.14159265359, 3.14159265359], + [-3.14159265359, 3.14159265359]] + } + +Step 2: Generate samples using the Saltelli sampling scheme +----------------------------------------------------------- + +Sobol SA requires the use of the Saltelli sampling scheme. The output of +the ``saltelli.sample`` function is a NumPy array that is of shape 2048 +by 3. The sampler generates 𝑁∗(2𝐷+2) samples, where in this example, N +is 256 (the argument we supplied) and D is 3 (the number of model +inputs), yielding 2048 samples. The keyword argument +``calc_second_order=False`` will exclude second-order indices, resulting +in a smaller sample matrix with 𝑁∗(𝐷+2) rows instead. Below, we plot the +resulting Saltelli sample. + +.. code:: ipython3 + + #Generate parmeter values using the saltelli.sample function + param_values = saltelli.sample(problem, 256) + + print(f"`param_values` shape: {param_values.shape}") + + + +.. parsed-literal:: + + `param_values` shape: (2048, 3) + + +.. code:: ipython3 + + #Plot the 2048 samples of the parameters + + fig = plt.figure(figsize = (7, 5)) + ax = plt.axes(projection ="3d") + ax.scatter3D(param_values[:,0], param_values[:,1], param_values[:,2]) + ax.set_xlabel('X1 Parameter') + ax.set_ylabel('X2 Parameter') + ax.set_zlabel('X3 Parameter') + plt.title("Saltelli Sample of Parameter Values") + + plt.show() + + + +.. image:: _static/output_7_0.png + + +Step 3: Execute the Ishigami function over our sample set +--------------------------------------------------------- + +SALib provides a nice wrapper to the Ishigami function that allows the +user to directly pass the ``param_values`` array we just generated into +the function directly. + +.. code:: ipython3 + + Y = Ishigami.evaluate(param_values) + +Step 4: Compute first-, second-, and total-order sensitivity indices using the Sobol method +------------------------------------------------------------------------------------------- + +The ``sobol.analyze`` function will use our problem dictionary and the +result of the Ishigami runs (``Y``) to compute first-, second-, and +total-order indicies. + +.. code:: ipython3 + + Si = sobol.analyze(problem, Y) + +``Si`` is a Python dict with the keys “S1”, “S2”, “ST”, “S1_conf”, +“S2_conf”, and “ST_conf”. The ``_conf`` keys store the corresponding +confidence intervals, typically with a confidence level of 95%. Use the +keyword argument ``print_to_console=True`` to print all indices. Or, we +can print the individual values from ``Si`` as shown in the next step. + +Step 5: Interpret our results +----------------------------- + +We execute the following code and take a look at our first-order indices +(``S1``) for each of our three inputs. These indicies can be interpreted +as the fraction of variance in the output that is explained by each +input individually. + +.. code:: ipython3 + + first_order = Si['S1'] + + print('First-order:') + print(f"x1: {first_order[0]}, x2: {first_order[1]}, x3: {first_order[2]}") + + +.. parsed-literal:: + + First-order: + x1: 0.3184242969763115, x2: 0.4303808201623416, x3: 0.022687722804980225 + + +If we were to rank the importance of the inputs in how much they +individually explain the variance in the output, we would rank them from +greatest to least importance as follows: 𝑥2, 𝑥1 and then 𝑥3. Since 𝑥3 +only explains 2% of the output variance, it does not explain output +variability meaningfully. Thus, this indicates that there is +contribution to the output variance by 𝑥2 and 𝑥1 independently, whereas +𝑥3 does not contribute to the output variance. Determining what inputs +are most important or what index value is meaningful is a common +question, but one for which there is no general rule or threshold. This +question is problem and context-dependent, but procedures have been +identified to rank order influential inputs and which can be used to +identify the least influential factors. These factors can be fixed to +simplify the model [4]_ [5]_ [6]_. + +Next, we evaluate the total-order indices, which measure the +contribution to the output variance caused by varying the model input, +including both its first-order effects (the input varying alone) and all +higher-order interactions across the input parameters. + +.. code:: ipython3 + + total_order = Si['ST'] + + print('Total-order:') + print(f"x1: {total_order[0]}, x2: {total_order[1]}, x3: {total_order[2]}") + + +.. parsed-literal:: + + Total-order: + x1: 0.5184119098161343, x2: 0.41021260250026054, x3: 0.2299058431439953 + + +The magnitude of the total order indices are substantially larger than +the first-order indices, which reveals that higher-order interactions +are occurring, i.e. that the interactions across inputs are also +explaining some of the total variance in the output. Note that 𝑥3 has +non-negligible total-order indices, which indicates that it is not a +consequential parameter when considered in isolation, but becomes +consequential and explains 25% of variance in the output through its +interactions with 𝑥1 and 𝑥2. + +Finally, we can investigate these higher order interactions by viewing +the second-order indices. The second-order indicies measure the +contribution to the output variance caused by the interaction between +any two model inputs. Some computing error can appear in these +sensitivity indices, such as negative values. Typically, these computing +errors shrink as the number of samples increases. + +.. code:: ipython3 + + second_order = Si['S2'] + + print("Second-order:") + print(f"x1-x2: {second_order[0,1]}") + print(f"x1-x3: {second_order[0,2]}") + print(f"x2-x3: {second_order[1,2]}") + + + +.. parsed-literal:: + + Second-order: + x1-x2: -0.043237389723234154 + x1-x3: 0.17506452088709862 + x2-x3: -0.03430682392607577 + + +We can see that there are strong interactions between 𝑥1 and 𝑥3. Note +that in the Ishigami function, these two variables are multiplied in the +last term of the function, which leads to interactive effects. If we +were considering first order indices alone, we would erroneously assume +that 𝑥3 explains no variance in the output, but the second-order and +total order indices reveal that this is not the case. It’s easy to +understand where we might see interactive effects in the case of the +simple Ishigami function. However, it’s important to remember that in +more complex systems, there may be many higher-order interactions that +are not apparent, but could be extremely consequential in explaining the +variance of the output. + +Tips to Apply Sobol SA to Your Own Problem +------------------------------------------ + +In this tutorial, we demonstrated how to apply an SA analysis to a +simple mathematical test function. In order to apply a Sobol SA to your +own problem, you will follow the same general workflow that we defined +above. You will need to: + +1. Choose sampling bounds for your parameters and set up the problem + dictionary as in Step 1 above. +2. Generate samples using the ``saltelli.sample`` function. This step is + problem-dependent and note that the Sobol method can be + computationally intensive depending on the model being analyzed. For + example, for a simple rainfall-runoff model such as HYMOD, it has + been recommended to run a sample size of at least N = 10,000 (which + translates to 60,000 model runs). More complex models will be slower + to run and will also require more samples to calculate accurate + estimates of Sobol indices. Once you complete this process, pay + attention to the confidence bounds on your sensitivity indices to see + whether you need to run more samples. +3. Run the parameter sets through your model. In the example above, the + Ishigami function could be evaluated through SALib since it is a + built in function. For your application, you will need to run these + parameter sets through the problem externally and save the output. + The output file should contain one row of output values for each + model run. +4. Calculate the Sobol indices. Now, the Y will be a numpy array with + your external model output and you will need to include the parameter + samples as an additional argument. +5. Finally, we interpet the results. If the confidence intervals of your + dominant indices are larger than roughly 10% of the value itself, you + may want to consider increasing your sample size as computation + permits. You should additionally read the references noted in Step 5 + above to understand more about identifying important factors. + + +.. only:: html + + **References** + +.. [1] Saltelli, A. (2002). "Making best use of model evaluations to compute sensitivity indices." Computer Physics Communications, 145(2):280-297, doi:`10.1016/S0010-4655(02)00280-1 `_. + +.. [2] Saltelli, A., P. Annoni, I. Azzini, F. Campolongo, M. Ratto, and S. Tarantola (2010). "Variance based sensitivity analysis of model output. Design and estimator for the total sensitivity index." Computer Physics Communications, 181(2):259-270, doi:`10.1016/j.cpc.2009.09.018 `_. + +.. [3] Sobol, I. M. (2001). "Global sensitivity indices for nonlinear mathematical models and their Monte Carlo estimates." Mathematics and Computers in Simulation, 55(1-3):271-280, doi:`10.1016/S0378-4754(00)00270-6 `_. + +.. [4] T.\ H.\ Andres, "Sampling methods and sensitivity analysis for large parameter sets," Journal of Statistical Computation and Simulation, vol. 57, no. 1–4, pp. 77–110, Apr. 1997, doi:`10.1080/00949659708811804 `_. + +.. [5] Y.\ Tang, P.\ Reed, T.\ Wagener, and K.\ van Werkhoven, "Comparing sensitivity analysis methods to advance lumped watershed model identification and evaluation," Hydrology and Earth System Sciences, vol. 11, no. 2, pp. 793–817, Feb. 2007, doi:`10.5194/hess-11-793-2007 `_. + +.. [6] J.\ Nossent, P.\ Elsen, and W.\ Bauwens, "Sobol' sensitivity analysis of a complex environmental model," Environmental Modelling & Software, vol. 26, no. 12, pp. 1515–1525, Dec. 2011, doi:`10.1016/j.envsoft.2011.08.010 `_. diff --git a/dev/docs/html/_sources/A2.3_logistic.rst b/dev/docs/html/_sources/A2.3_logistic.rst new file mode 100644 index 0000000..852f424 --- /dev/null +++ b/dev/docs/html/_sources/A2.3_logistic.rst @@ -0,0 +1,372 @@ +Logistic Regression Tutorial +**************************** + +.. note:: + + | Run the tutorial interactively: `Logistic Regression Tutorial `_. + | Please be aware that notebooks can take a couple minutes to launch. + | To run the notebooks yourself, download the files `here `_ and use these `requirements `_. + +Tutorial: Logistic Regression for Factor Mapping +================================================ + +This tutorial replicates a scenario discovery analysis performed in +`Hadjimichael et +al. (2020) `__. + +Background +---------- + +Planners in the the Upper Colorado River Basin (UCRB, shown in the +figure below) are seeking to understand the vulnerability of water users +to uncertainties stemming from climate change, population growth and +water policy changes. The UCRB spans 25,682 km2 in western Colorado and +is home to approximately 300,000 residents and 1,012 km2 of irrigated +land. Several thousand irrigation ditches divert water from the main +river and its tributaties for irrigation (shown as small black dots in +the figure). Transmountain diversions of approximately 567,400,000 m3 +per year are exported for irrigation, industrial and municipal uses in +northern and eastern Colorado, serving the major population centers of +Denver and Colorado Springs. These diversions are carried through +tunnels, shown as large black dots in the figure. + +.. image:: _static/basin_map.png + +An important planning consideration is the water rights of each user, +defined by seniority across all water uses (irrigation diversions, +transboundary diversions, power plants etc.) in the basin. To assess the +vulnerability of users with varying degrees of water rights seniority, +planners simulate the system across an ensemble of scenarios using the +state of Colorado’s StateMod platform. The model simulates streamflow, +diversions, instream demands, and reservoir operations. + +Hadjimichael et al. (2020) employ an exploratory analysis by simulating +a large ensemble of plausible scenarios using StateMod and then +identifying consequential decision-relevant combinations of uncertain +factors, termed scenario discovery. Focusing on decision-relevant +metrics (metrics that are important to the user, the scenario discovery +is applied to the water shortages experienced by each individual user +(i.e., not on a single basin-wide or sector-wide metric). For this +training example, we’ll be performing scenario discovery for three +different water users: two irrigation users and one municipal user. + +Let’s get started! +------------------ + +In this tutorial, we will be loading in data that has been produced in +Hadjimichael et al. (2020). Before we start our analysis, we’ll load the +relevant Python libraries, example data, and information for the three +users. + +.. code:: ipython3 + + #import libraries + import msdbook + import numpy as np + import pandas as pd + import matplotlib as mpl + import matplotlib.pyplot as plt + + # load example data from Hadjimichael et al. (2020) + msdbook.install_package_data() + + # Select the IDs for the three users that we will perform the analysis for + all_IDs = ['7000550','7200799','3704614'] + usernames = ['Medium seniority irrigation', + 'Low seniority irrigation', + 'Transbasin municipal diversion'] + nStructures = len(all_IDs) + + + +.. parsed-literal:: + + Downloading example data for msdbook version 0.1.5... + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/uncertain_params_bounds.txt + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_metric_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_delta.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/solutions.resultfile + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_heatmap.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LHsamples_original_1000.txt + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_pseudo_r_scores.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/param_values.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_delta.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_pseudo_r_scores.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/collapse_days.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_params_256samples.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_heatmap.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_heatmap.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_delta.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_pseudo_r_scores.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LeafCatch.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_simulations_256samples.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/Robustness.txt + + +Step 1: Load Latin hypercube sample and set up problem +------------------------------------------------------ + +To examine regional vulnerability, we generate an ensemble of plausible +future states of the world (SOWs) using Latin Hypercube Sampling. For +this tutorial, we’ll load a file containing 1,000 samples across 14 +parameters. The sampled parameters encompass plausible changes to the +future state of the basin, including changes to hydrology, water demands +(irrigation, municipal & industry, transbasin), and institutional and +environmental factors (environmental flows, reservoir storage, operation +of the Shoshone Power Plant). These samples are taken from ranges +identified in ``param_bounds``. Below we load in the 1000 samples, the +range of values that the samples can take for each parameter, and the +parameter names. More information on what each parameter constitutes can +be found in Table 1 of Hadjimichael et al., 2020. + +.. code:: ipython3 + + #Identify the bounds for each of the 14 parameters + param_bounds = msdbook.load_basin_param_bounds() + + #Load in the parameter samples + LHsamples = msdbook.load_lhs_basin_sample() + + #Create an array of the parameter names + param_names=['Irrigation demand multiplier','Reservoir loss','Transbasin demand multiplier', + 'Municipal & industrial multiplier', 'Shoshone','Environmental flows', + 'Evaporation change','Mean dry flow','Dry flow variance', + 'Mean wet flow','Wet flow variance','Dry-dry probability', + 'Wet-wet probability', 'Snowmelt shift'] + + +Step 2: Define decision-relevant metrics for illustration +--------------------------------------------------------- + +Scenario discovery attempts to identify parametric regions that lead to +‘success’ and ‘failure’. For this demonstration we’ll be defining +‘success’ as states of the world where a shortage level doesn’t exceed +its historical frequency. + +Step 3: Run the logistic regression +----------------------------------- + +Logistic regression estimates the probability that a future SOW will be +classified as a success or failure given a set of performance criteria. +A logistic regression model is defined by: + +.. math:: ln \bigg (\frac{p_i}{1-p_i} \bigg ) = X^T_i \beta + +where :math:`p_i` is the probability the performance in the +:math:`i^{th}` SOW will be classified as a success, :math:`X_i` is the +vector of covariates describing the :math:`i^{th}` SOW, and +:math:`\beta` is the vector of coefficients describing the relationship +between the covariates and the response, which here will be estimated +using maximum likelihood estimation. + +A logistic regression model was fit to the ensemble of SOWs using the +performance criteria defined in step 2. Logistic regression modeling was +conducted using the `Statsmodel +Python `__ package. The +data required for the full analysis is too large to include in this +tutorial, but results can be found in the data file loaded below. + +The results files contain the occurence of different shortage frequency +and magnitude combinations under the experiment, in increments of 10, +between 0 and 100. These combinations (100 for each user) are +alternative decision-relevant metrics that can be used for scenario +discovery. + +.. code:: ipython3 + + # Set arrays for shortage frequencies and magnitudes + frequencies = np.arange(10, 110, 10) + magnitudes = np.arange(10, 110, 10) + realizations = 10 + + # Load performance and pseudo r scores for each of the users + results = [msdbook.load_user_heatmap_array(all_IDs[i]) / 100 for i in range(len(all_IDs))] + + +Step 4: Factor ranking +---------------------- + +To rank the importance of each uncertain factor, we utilize McFadden’s +psuedo-R2, a measure that quantifies the improvement of the model when +utilizing each given predictor as compared to prediction using the mean +of the data set: + +.. math:: R^2_{McFadden}=1-\frac{ln \hat{L}(M_{full})}{ln \hat{L}(M_{intercept})} + +Here :math:`ln \hat{L}(M_{full})` is the log likelihood of the full +model (including the predictor) and :math:`ln \hat{L}(M_{intercept})` is +the log likelihood of the intercept model (which predicts the mean +probability of success across all SOWs). + +Higher values of McFadden’s psuedo-R2 indicate higher factor importance +(when the likelihood of the full model approaches one, the ratio of the +likelihood of the full model compared to the intercept model will get +very small). + +.. code:: ipython3 + + #Load the pseudo-R^2 scores + scores = [msdbook.load_user_pseudo_scores(all_IDs[i]) for i in range(len(all_IDs))] + + # Select indices of frequency and magnitudes that will be used for the visualization + freq = [1,0,0] + mag = [7,3,7] + + +Step 5: Draw factor maps +------------------------ + +The McFadden’s psuedo-R2 scores files contain preliminary logistic +regression results on parameter importance for each of these +combinations. Using these psuedo-R2 scores we will identify the two most +important factors for each metric which we’ll use to generate the final +scenario discovery maps (note: there may be more than two important +metrics for each user, but here we will demonstrate by mapping two). + +.. code:: ipython3 + + # setup figure + fig, axes = plt.subplots(3,1, figsize=(6,18), tight_layout=True) + fig.patch.set_facecolor('white') + + for i in range(len(axes.flat)): + + ax = axes.flat[i] + + allSOWsperformance = results[i] + all_pseudo_r_scores = scores[i] + + # construct dataframe + dta = pd.DataFrame(data=np.repeat(LHsamples, realizations, axis = 0), columns=param_names) + dta['Success'] = allSOWsperformance[freq[i],mag[i],:] + + pseudo_r_scores = all_pseudo_r_scores[str(frequencies[freq[i]])+'yrs_'+str(magnitudes[mag[i]])+'prc'].values + top_predictors = np.argsort(pseudo_r_scores)[::-1][:2] #Sort scores and pick top 2 predictors + + # define color map for dots representing SOWs in which the policy + # succeeds (light blue) and fails (dark red) + dot_cmap = mpl.colors.ListedColormap(np.array([[227,26,28],[166,206,227]])/255.0) + + # define color map for probability contours + contour_cmap = mpl.cm.get_cmap('RdBu') + + # define probability contours + contour_levels = np.arange(0.0, 1.05,0.1) + + # define base values of the predictors + SOW_values = np.array([1,1,1,1,0,0,1,1,1,1,1,0,0,0]) # default parameter values for base SOW + base = SOW_values[top_predictors] + ranges = param_bounds[top_predictors] + + # define grid of x (1st predictor), and y (2nd predictor) dimensions + # to plot contour map over + xgrid = np.arange(param_bounds[top_predictors[0]][0], + param_bounds[top_predictors[0]][1], np.around((ranges[0][1]-ranges[0][0])/500,decimals=4)) + ygrid = np.arange(param_bounds[top_predictors[1]][0], + param_bounds[top_predictors[1]][1], np.around((ranges[1][1]-ranges[1][0])/500,decimals=4)) + all_predictors = [ dta.columns.tolist()[i] for i in top_predictors] + dta['Interaction'] = dta[all_predictors[0]]*dta[all_predictors[1]] + + # logistic regression here + predictor_list = [all_predictors[i] for i in [0,1]] + result = msdbook.fit_logit(dta, predictor_list) + + # plot contour map + contourset = msdbook.plot_contour_map(ax, result, dta, contour_cmap, + dot_cmap, contour_levels, xgrid, + ygrid, all_predictors[0], all_predictors[1], base) + + ax.set_title(usernames[i]) + + # set up colorbar + cbar_ax = fig.add_axes([0.98, 0.15, 0.05, 0.7]) + cbar = fig.colorbar(contourset, cax=cbar_ax) + cbar_ax.set_ylabel('Probability of Success', fontsize=16) + cbar_ax.tick_params(axis='y', which='major', labelsize=12) + + + +.. parsed-literal:: + + /srv/conda/envs/notebook/lib/python3.7/site-packages/statsmodels/base/model.py:127: ValueWarning: unknown kwargs ['disp'] + warnings.warn(msg, ValueWarning) + + +.. parsed-literal:: + + Optimization terminated successfully. + Current function value: 0.378619 + Iterations 8 + Optimization terminated successfully. + Current function value: 0.397285 + Iterations 8 + Optimization terminated successfully. + Current function value: 0.377323 + Iterations 8 + + + +.. image:: _static/notebook_logistic_output_11_1.png + + +The figure above demonstrates how different combinations of the +uncertain factors lead to success or failure in different states of the +world, which are denoted by the blue and red dots respectively. The +probability of success and failure are further denoted by the contours +in the figure. Several insights can be drawn from this figure. + +First, using metrics chosen to be decision-relevant (specific to each +user) causes different factors to be identified as most important by +this scenario-discovery exercise (the x- and y-axes for each of the +subplots). In other words, depending on what the decision makers of this +system want to prioritize they might choose to monitor different +uncertain factors to track performance. + +Second, in the top panel, the two identified factors appear to also have +an interactive effect on the metric used (shortages of a certain level +and frequency in this example). In terms of scenario discovery, the +Patient Rule Induction Method (PRIM) or Classification And Regression +Trees (CART) would not be able to delineate this non-linear space and +would therefore misclassify parameter combinations as ‘desirable’ when +they were in fact undesirable, and vice versa. + +Lastly, logistic regression also produces contours of probability of +success, i.e. different factor-value combinations are assigned different +probabilities that a shortage level will be exceeded. This allows the +decision makers to evaluate these insights while considering their risk +aversion. + +Tips to Apply Scenario Discovery to Your Own Problem +---------------------------------------------------- + +In this tutorial, we demonstrated how to perform a scenario discovery +analysis for three different users in the UCRB. The analysis allowed us +to determine which parameters the users would be most affected by and to +visualize how different ranges of these parameters lead to success and +failure for different users. This framework can be applicable to any +other application where it is of interest to characterize success and +failure based on uncertain parameter ranges. In order to apply the same +framework to your own problem: + +1. Choose sampling bounds for your parameters of interest, which will + represent uncertainties that characterize your system. +2. Generate samples for these parameters (this can be done using the + ``saltelli.sample`` function or externally). +3. Define what constitutes success and failure in your problem. In this + tutorial, success was defined based on not surpassing the historical + drought frequency. Choose a metric that is relevant to your problem + and decision-makers that might be involved. If your model involves an + optimization, you can also define metrics based on meeting certain + values of these objectives. +4. Run the parameter sets through your model and calculate success and + failure based on your metrics and across different users if + applicable. This step will allow you to create the scatter plot part + of the final figure. +5. If it is of interest, the contours on the figure can be created by + fitting the logistic regression model in a similiar manner as denoted + in Steps 3 and 5 of the tutorial. + + diff --git a/dev/docs/html/_sources/A2.4_hymod.rst b/dev/docs/html/_sources/A2.4_hymod.rst new file mode 100644 index 0000000..e847e8a --- /dev/null +++ b/dev/docs/html/_sources/A2.4_hymod.rst @@ -0,0 +1,1242 @@ +HYMOD Dynamics Tutorial +************************* + +.. note:: + + | Run the tutorial interactively: `HYMOD Notebook `_. + | Please be aware that notebooks can take a couple minutes to launch. + | To run the notebooks yourself, download the files `here `_ and use these `requirements `_. + +Tutorial: Sensitivity Analysis of the HYMOD Model +================================================= + +The purpose of this tutorial is to demonstrate the global sensitivity +analysis concepts and tools established in the Section 3.1 of the main +text of this eBook. This demonstration will highlight the central role +of design of experiments (Section 3.3), when implementing global +sensitivity analysis tools described in Section 3.4. + +We’ll explore these tools and concepts using the HYdrological MODel +(HYMOD), a rainfall-runoff model developed and used for river flow +forecasting. HYMOD was chosen for demonstration because its purpose is +to abstract highly complex and non-linear systems. The methods +demonstrated in thistutorial can be applied to numerical models that +simulate other complex non-linear systems. + +This tutorial will first introduce HYMOD and use it to simulate +streamflows in a river basin. Next, we’ll employ sensitivity analysis +concepets described in Section 3 of the main text to examine how values +of HYMOD’s parameters impact streamflow predictions. We’ll then explore +how the effects of these parameters may change over time using +time-varying sensitivtiy analysis. Finally, we’ll demonstrate concepts +presented in Chapter 7 through two ensemble-based methods of uncertainty +quantification - Generalized Likelihood Uncertainty Estimation (GLUE) +and Pre-Calibration. + +The tutorial includes the following steps: + +1. Introduction to HYMOD +^^^^^^^^^^^^^^^^^^^^^^^^ + +`1.1 - Introduction to a simple hydrologic model (HYMOD) <#hymod>`__ + +`1.2 - Input Data <#inputs>`__ + +`1.3 - Running a basic simulation <#baseline>`__ + +`1.4 - Model outputs <#outputs>`__ + +2. Global Sensitivity Analysis +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +`2.1 - Design of Experiments <#sensitivity>`__ + +`2.2 - Sensitivity analysis for one output <#sametrics>`__ + +`2.3 - Sensitivity analysis across multiple outputs <#diffperformance>`__ + +`2.4 - Time-varying sensitivity analysis <#time>`__ + +1 - Introduction to HYMOD +========================= + +.. _hymod: + +1.1 Overview + +HYMOD is a hydrologic model (rainfall-runoff model) that simulates key +hydrologic fluxes such as infiltration, streamflow and +evapotranspiration. The model was originally developed and used for +river flow forecasting, but it has also been been used to explore +different sensitivity analysis (e.g., `Herman et al., +2013 `__), +uncertainty quantification (e.g., `Smith et al., +2008 `__), +and optimization (e.g., `Ye et al., +2014 `__) +concepts. + +HYMOD accepts two inputs - daily precepitation and daily potential +evapotranspiration (PET)- and generates predicitons of daily streamflow. +HYMOD abstracts the highly non-linear process of runoff routing by +dividing the flow into two components: quick flow, representing +precipitation that quickly runs off the surface of the watershed into +the stream, and slow flow, that moves through the soil and takes much +longer to arrive at the stream. + +To generate streamflow predictions, HYMOD first models vertical +processes within the watershed to determine how much water infiltrates +and evaporates from the soil at a given time step. It then determines +how much water should be partitioned into quick flow and slow flow +processes. Within each process it abstracts residence time (the time it +takes a unit volume of water to move through the watershed and into the +stream) using a series of “reservoirs” each with a calibrated residence +time. + +HYMOD’s representation of hydrologic processes are shown Figure 1 below +and controlled by the following parameters: + +:math:`H_{uz}`: the maximum water storage capacity of the soil (mm) + +:math:`B_{exp}`: parameters describing the degree of spatial variability +within the basin between 0 and Huz + +:math:`Alp`: Fraction of runoff contributing to quick flow + +:math:`K_q`: Quick flow residence time of linear infinite reservoir (the +Kq values of all three linear reservoirs are the same) + +:math:`K_s`: Slow flow residence time of linear infinite reservoir + + +.. image:: _static/hymod_schematic-DAVE.png + +Vertical Processes +~~~~~~~~~~~~~~~~~~ + +HYMOD models the fraction of water that is stored in the soil +:math:`(F(XH_{uz}))` using the following relationship: + +.. math:: F(XH_{uz}) = 1 - (1 - \frac{XH_{uz}}{H_{uz}})^{B} + +where :math:`XH_{uz}` is the water storage capacity of the soil; +:math:`H_{uz}` is the parameter describing basin maximum water +storage capacity (mm); and :math:`B` is the parameter describing the +degree of spatial variability within the basin. + +The portion of precipitation that exceeds the water storage capacity is +treated as runoff. + +Horizontal Processes +~~~~~~~~~~~~~~~~~~~~ + +To route runoff to streamflow, the excess runoff from the vertical +processes is split into quick flow and slow flow. The proportion of +runoff partitioned into quick flow and slow flow is determined by a +parameter :math:`Alp`, which ranges between 0 and 1. Quick flow is +routed through :math:`N` identical quick flow tanks :math:`Q1, Q2... QN` +(shown above as :math:`N=3`). The rate at which runoff moves through the +quick flow system is described by the residence time of the quick frlow +tanks, :math:`Kq` (day). Slow flow is routed through a parallel slow +flow tank and the rate at which slow flow is routed is described by the +slow flow residences time, :math:`Ks` (day). + +Citation: Wagener, T., Boyle, D. P., Lees, M. J., Wheater, H. S., Gupta, +H. V., & Sorooshian, S. (2001). A framework for development and +application of hydrological models. Hydrology and Earth System Sciences, +5(1), 13-26. + +.. _inputs: + +1.2 Input data + +The HYMOD model only requires precipitation and potential +evapotranspiration as inputs. For this example, we’ll run HYMOD using +data from the Leaf River, a humid catchment located north of Collins +Mississippi that has been widely used to explore HYMOD. The dataset also +includes daily observed runoff that we later use to evaluate the +performace of each sensitvity analysis sample set. + +In the following section of code, we’ll load the necessary python +libraries and read in the input file. For this exercise we’ll only use +the first eleven years of data. The first five rows of the input dataset +are printed to show what they look like: + +.. code:: ipython3 + + import msdbook + + import numpy as np + import pandas as pd + import seaborn as sns + + from sklearn import metrics + from matplotlib import pyplot as plt + + # load example data + msdbook.install_package_data() + + # load the Leaf River HYMOD input file + leaf_data = msdbook.load_hymod_input_file() + + # extract the first eleven years of data + leaf_data = leaf_data.iloc[0:4015].copy() + + print('Leaf River Data structure:') + + # There are only three columns in the file including precipitation, potential evapotranspiration and streamflow + leaf_data.head() + + +.. parsed-literal:: + + Downloading example data for msdbook version 0.1.5... + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/uncertain_params_bounds.txt + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_metric_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_delta.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/solutions.resultfile + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_heatmap.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LHsamples_original_1000.txt + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/3704614_pseudo_r_scores.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/param_values.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_mth_delta.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_pseudo_r_scores.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/collapse_days.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_params_256samples.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_vary_s1.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7000550_heatmap.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_heatmap.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/sa_by_yr_delta.npy + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/7200799_pseudo_r_scores.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/LeafCatch.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/hymod_simulations_256samples.csv + Unzipped: /srv/conda/envs/notebook/lib/python3.7/site-packages/msdbook/data/Robustness.txt + Leaf River Data structure: + + + + +.. raw:: html + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PrecipPot_ETStrmflw
00.04.600.29
10.04.310.24
20.04.330.21
30.04.780.19
40.02.910.18
+
+ + + +To visualize catchment hydrology, streamflow and precipitation data are +usually plotted together as a combined hydrograph (streamflow ) and +hyetograph (rainfall, from Greek.hyetos, “rain”). Streamflow is plotted +as a time series, while rainfall is shown as an inverted bar plot along +the top of the graph. Streamflow labels are shown on the left y-axis, +while rainfall labels are shown on the right y-axis. + +.. code:: ipython3 + + # make an axis for the hydrograph + fig, strmflw_ax = plt.subplots(figsize=[12,6]) + strmflw_ax.set_ylim([0, 50]) + + #make a second y-axis for the hyetograph + precip_ax = strmflw_ax.twinx() + precip_ax.set_ylim([0, 200]) + precip_ax.invert_yaxis() + + precip = leaf_data['Precip'] + strmflw_ax.plot(range(0, len(leaf_data['Precip'])), leaf_data['Strmflw'], color='lightcoral') + strmflw_ax.set_ylabel('Streamflow (mm/day)') + + precip_ax.bar(range(0, len(leaf_data['Precip'])), leaf_data['Precip'], width=2) + precip_ax.set_ylabel('Rainfall (mm/day)') + precip_ax.legend(['Precip'], loc='center right') + strmflw_ax.legend(['Streamflow'],bbox_to_anchor=(1, 0.48)) + + + + + +.. parsed-literal:: + + + + + + +.. image:: _static/hymod1.png + +.. _baseline: + +1.3 Running a Baseline Model Simulation + +We’ll start our experiment by running HYMOD using its default +parameters. + +.. code:: ipython3 + + # assign input parameters to generate a baseline simulated streamflow + Nq = 3 # Number of quickflow routing tanks + Kq = 0.5 # Quickflow routing tanks' rate parameter + Ks = 0.001 # Slowflow routing tank's rate parameter + Alp = 0.5 # Quick/slow split parameter + Huz = 100 # Maximum height of soil moisture accounting tank + B = 1.0 # Scaled distribution function shape parameter + + # Note that the number of years is 11. One year of model warm-up and ten years are used for actual simulation + model = msdbook.hymod(Nq, Kq, Ks, Alp, Huz, B, leaf_data, ndays=4015) + + +.. _outputs: + +1.4 Model Outputs + +Model outputs include actual evapotranspiration, quick and fast +streamflow, and combined runoff. In this tutorial we focus on the total +daily runoff, QQ (:math:`m^3/s`). We can use the following script to +plot simulated streamflow against observed streamflow. + +.. tip:: View the source code used to create this plot here: `plot_observed_vs_simulated_streamflow `_ + +.. code:: ipython3 + + ax = msdbook.plot_observed_vs_simulated_streamflow(df=leaf_data, hymod_dict=model) + + + +.. image:: _static/hymod2.png + + + +So how does our model perform? We can investigate model performance +across several metrics: + +1: Mean Absolute Error (MAE); MAE conveys how the model performs on +average across the 10 year simulation period, with smaller values +indicating better performance. The absolute value is taken so that +positive and negative errors do not cancel each other out. + +.. math:: MAE = \frac{1}{N}\sum_{t=0}^N\left\lvert Q_{sim,t}-Q_{obs,t}\right\rvert + +2: Root Mean Square Error (RMSE); RMSE is sum of square errors across +the 10 year simulation period. RMSE is sensitive to large errors between +the historical record and the simulated flows, and thus is useful for +highlighting the model’s ability to capture of extreme flood events. + +.. math:: RMSE = \sqrt{\frac{1}{N}\sum_{t=1}^{N}(Q_{sim,t}-Q_{obs,t})^2} + +3: Log-Root Mean Square Error (Log(RMSE)) LOG(RMSE) focuses on model +performance during low-flow events. + +.. math:: LOG(RMSE) = log(RMSE) + +.. code:: ipython3 + + mae = np.mean(abs(leaf_data['Strmflw'] - model['Q'])) + mse = metrics.mean_squared_error(model['Q'], leaf_data['Strmflw']) + rmse = mse**(1/2) + + print('MAE: ' + str(mae) + '\nRMSE: ' + str(mse) + '\nLOG(RMSE): ' + str(rmse)) + + +.. parsed-literal:: + + MAE: 1.0787471470460999 + RMSE: 4.375695937555197 + LOG(RMSE): 2.0918164206151544 + + +The error metrics show that HYMOD performs reasonably well, the MAE is +around 1 :math:`m^3/s`, the RMSE is on the order of 10% of the largest +observed streamflow and the LOG(RMSE) is fairly low. + +2- Global Sensitivity Analysis +============================== + +.. _sensitivity: + +2.1 Experimental Design and Setup + +Now we’ll examine how sensitive streamflow simulations generated by +HYMOD are to the model’s input parameters. We’ll perform global +sensitivity analysis (see Section 3.1 of the main text) using the SALib +Python library. + +.. code:: ipython3 + + from SALib.sample import saltelli + from SALib.analyze import sobol + from SALib.analyze import delta + +A first and critical step when conducting sensitivity analysis is +determining the experimental design (see Design of Experiments, Section +3.4 of the main text). Our experimental design involves defining the +uncertainties that we’ll be examining, the output of interest, the +ranges of each uncertainty that will be explored and the strategy for +sampling the uncertainty space. + +For this experiment we’ll explore the five parameters highlighted in +Figure 1. We’ll draw their ranges from existing literature on the model +(note Jon H. paper). We’ll use a Sobol sampling an a quasi-random +sampling with low sequences approach to sample the uncertainty space +(Section 3.3.4). + +In this demonstration we’ll utilize Sobol Sensitivity Analysis, a +variance based method (Section 3.4.5). + +To explore HYMOD’s behavoir, we’ll examine the sensitivity of four model +ouputs to input parameters: 1) predicted flow, 2) Mean Absolute Error +(compared with the calibaration data set), 3) Root Mean Square Error and +4) Log Root Mean Square Error. + +This analysis will employ SALib, a Python implementation also utilized +in the other SA tutorial (make this more formal). + +To start our analysis, we’ll create a dictionary that describes our +model uncertainties and their ranges, this dictionary is named +“problem_hymod” (SALib refers to these dictionaries as “problems”). + +.. code:: ipython3 + + problem_hymod = { + 'num_vars': 5, + 'names': ['Kq', 'Ks', 'Alp', 'Huz', 'B'], + 'bounds': [[0.1, 1], # Kq + [0, 0.1], # Ks + [0, 1], # Alp + [0.1, 500], # Huz + [0, 1.9]] # B + } + +After defining our uncertainites and ranges, we’ll use SALib to sample +the uncertainty space and run the model for each of the sample sets. We +will load a sample that has already been created ``param_values_hymod`` +for demonstration purposes. For HYMOD, literature recommends running at +least N = 10,000 samples, to keep this demonstration easy to run +however, we utilize only 256 sobol samples of uncertainties. To generate +accurate approximations of second order sensitivity indicies SALib +generates N*(2k+2) sets of samples, where N=256 and k=5 (number of +uncertainties). For the math behind why this is needed, see (Saltelli, +A., 2002. Making best use of model evaluations to compute sensitivity +indices. Computer Physics Communications 145, 280–297. +https://doi.org/10.1016/S0010-4655(02)00280-1). + +The actual model simulation takes an extended period, so we also load +the simulation data from a previous run. The following demonstrates how +to conduct this analysis: + +.. code:: python + + + # generate 256 samples. + param_values_hymod = saltelli.sample(problem_hymod, 256) + + # dictionary to store outputs in + d_outputs = {} + + # run simulation for each parameter sample + for i in range(0, len(param_values_hymod)): + + # run model for each sensitivity analysis parameter sets + hymod_output = msdbook.hymod(Nq, + param_values_hymod[i, 0], + param_values_hymod[i, 1], + param_values_hymod[i, 2], + param_values_hymod[i, 3], + param_values_hymod[i, 4], + leaf_data, + ndays=4015) + + # store the simulated total flow discharge + d_outputs[f"Q{i}"] = hymod_output["Q"] + + + Q_df_bw = pd.DataFrame(d_outputs) + +.. code:: ipython3 + + # load previously generated parameter values + param_values_hymod = msdbook.load_hymod_params() + + # number of samples + n_samples = len(param_values_hymod) + + # load previously generated hymod simulated outputs + Q_df_bw = msdbook.load_hymod_simulation() + + # column names of each sample simulation number + sample_column_names = [i for i in Q_df_bw.columns if i[0] == 'Q'] + + +Running HYMOD - Model Warm-up +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A hydrological model such as HYMOD usually includes ordinary +differential equations that are sensitive to their initial condition. +They also have components in their underlying formulation that have long +memory such that prior time steps can affect their current simulations. +For example, soil moisture or groundwater can hold water for a long time +and therefore they are often considered to exhibit a long memory. This +can affect the partitioning of water to runoff and infiltration, while +also controlling the generation of base flow. Therefore, it is important +to have a reasonable initial value for them. To achieve this, +hydrologists usually extend their simulation period and after the +simulations, they remove that extended time period that has unreasonable +groundwater or surface water values. This time period is called the +warm-up time period. + +Here we extended our simulation for one year (from 10 years to 11 years) +and we removed the first year of simulation, therefore our warm-up +period is one year. + +.. code:: ipython3 + + # exclude the first year of simulation from the simulations and reset the index + Q_df = Q_df_bw.iloc[365:4015].copy().reset_index(drop=True) + + # exclude the first year of the input data and reset the index + leaf_data = leaf_data.iloc[365:4015].copy().reset_index(drop=True) + + +Now that HYMOD has been warmed up, we’ll examine how HYMOD’s streamflow +outputs vary under different sample sets, and compare them with the +observed streamflow. + +.. code:: ipython3 + + # add date columns to our simulation data frame; for this data our start date is 1/1/2000 + date_ts = pd.date_range(start='1/1/2000', periods=3650, freq='D') + Q_df['date'] = date_ts + Q_df['year'] = date_ts.year + Q_df['month'] = date_ts.month + Q_df['day'] = date_ts.day + + # aggregate the simulated observed streamflow to monthly mean + df_sim_mth_mean = Q_df.groupby(['year', 'month'])[sample_column_names].mean() + + # do the same for the observed data + date_ts = pd.date_range(start='1/1/2000', periods=len(leaf_data), freq='D') + leaf_data['date'] = date_ts + leaf_data['year'] = date_ts.year + leaf_data['month'] = date_ts.month + leaf_data['day'] = date_ts.day + + # aggregate the daily observed streamflow to monthly mean + df_obs_mth_mean = leaf_data.groupby(['year', 'month']).mean() + + +.. tip:: View the source code used to create this plot here: `plot_observed_vs_sensitivity_streamflow `_ + +.. code:: ipython3 + + ax = msdbook.plot_observed_vs_sensitivity_streamflow(df_obs=df_obs_mth_mean, + df_sim=df_sim_mth_mean) + + + + +.. image:: _static/hymod3.png + + + +.. _sametrics: + +2.2 Sensitivity of streamflows to model parameters + +Now we’ll examine how each of HYMOD’s parameters impact the variance of +simulated streamflows. Using SALib we’ll calculate the first order and +total order sensitivity indicies of each model parameter. The first +order sensitivity index measure’s the individual impact that a given +parameter has on the variance of the simulated streamflows. The total +order index measures the impact of a given parameter along with all +interactions that other parameters have with the given parameter on +simulated streamflows. + +We’ll start with an matrix, Y, which contains our simulated streamflows +for every uncertainty sample. We’ll then use the sobol.analyze function +from SALib to calculate the sensitivity indicies (Si). The arguments for +this function are the problem dictionary defined in part 2.2 of this +tutorial, and the matrix of simulated streamflows, Y. + +.. code:: ipython3 + + # overall aggregated indices + Y = Q_df[sample_column_names].mean().to_numpy() + + # Perform analysis + Si = sobol.analyze(problem_hymod, Y) + +Now we can examine our results, we’ll print the first order and total +order Si’s for each parameter, then visualize the results with bar plots + +.. code:: ipython3 + + print('First order indices = ', Si['S1']) + + print('Total order indicies = ', Si['ST']) + + sns.set_style('white') + fig = plt.figure(figsize=(8,4)) + ax1 = fig.add_subplot(121) + ax1.bar(np.arange(5), Si['S1']) + ax1.set_xticklabels(['','Kq', 'Ks', 'Alp', 'Huz', 'B']) + ax1.set_ylabel('First order Si') + ax1.set_ylim([0,1]) + + ax2 = fig.add_subplot(122) + ax2.bar(np.arange(5), Si['ST']) + ax2.set_xticklabels(['','Kq', 'Ks', 'Alp', 'Huz', 'B']) + ax2.set_ylabel('Total order Si') + ax2.set_ylim([0,1]) + + + +.. parsed-literal:: + + First order indices = [9.55550001e-05 7.49249463e-04 5.62386413e-04 7.03327551e-01 + 2.53701895e-01] + Total order indicies = [1.76174200e-06 1.63288175e-03 3.41378460e-04 6.88983864e-01 + 2.53922146e-01] + + +.. parsed-literal:: + + /srv/conda/envs/notebook/lib/python3.7/site-packages/ipykernel_launcher.py:9: UserWarning: FixedFormatter should only be used together with FixedLocator + if __name__ == '__main__': + /srv/conda/envs/notebook/lib/python3.7/site-packages/ipykernel_launcher.py:15: UserWarning: FixedFormatter should only be used together with FixedLocator + from ipykernel import kernelapp as app + + + + +.. parsed-literal:: + + (0.0, 1.0) + + + + +.. image:: _static/hymod4.png + + + +Our findings indicate that in this instance, the streamflow estimate +from HYMOD is highly sensitive to soil moisture parameters Huz and B and +hardly affected by the routing parameters. Notably, there is very little +interactions between parameters causing the total order indicies to be +nearly identical to the first order indicies. + + +.. _diffperformance: + +2.3 How do different performance metrics affect the results of our +sensitivity analysis? + +Streamflow has many different properties. In this section, we discuss +how the selection of metrics can lead to fundamentally different +sensitivity analysis results. For example, one can only focus on +aggregated streamflow metrics such as mean (what has been presented so +far), or only on extreme events such as drought or floods. + +Here we compare three different metrics: 1- Mean error (ME) 2- Root Mean +Square Error (RMSE) 3- Log-Root Mean Square Error (Log(RMSE)) + +Each of these metrics focuses on a specific attribute of streamflow. For +example, RMSE highlights the impacts of extreme flood events, while +LOG(RMSE) focuses on model performance during low-flow events. + +.. code:: ipython3 + + # calculate error metrics + mae = Q_df[sample_column_names].apply(lambda x: abs(x-leaf_data["Strmflw"]), axis=0) + mse = Q_df[sample_column_names].apply(lambda x: metrics.mean_squared_error(x, leaf_data["Strmflw"]), axis=0) + rmse = mse**(1/2) + + # add error metrics to a dictionary + d_metrics = {'MAE': mae.mean().values, + 'RMSE': rmse.values, + 'LOG[RMSE]': np.log10(rmse.values)} + + # convert to a dataframe + df_metrics_SA = pd.DataFrame(d_metrics) + + +We can use the following to calculate the SA indices for each metric and +visualize it. + +.. code:: ipython3 + + df_metric_s1_result = pd.DataFrame(np.zeros((3, 5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B']) + df_metric_sT_result = pd.DataFrame(np.zeros((3, 5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B']) + + # conduct sensitivity analysis for each metric + for index, i in enumerate(d_metrics.keys()): + + # get the data as a numpy array for the target metric + Y = d_metrics[i] + + # use the metric to conduct SA + Si = sobol.analyze(problem_hymod, Y, print_to_console=False) + + # add the sensitivity indices to the output data frame + df_metric_s1_result.iloc[index, :] = Si['S1'] + df_metric_sT_result.iloc[index, :] = Si['ST'] + +.. code:: ipython3 + + # create seaborn heatmap with required labels + fig = plt.figure(figsize=(12,4)) + ax1 = fig.add_subplot(121) + # labels for y-axis + y_axis_labels = ['Mean Absoulte Error', 'RSME', 'Log(RMSE)'] + + # plot heatmap + ax1 = sns.heatmap(df_metric_s1_result, yticklabels=y_axis_labels, annot=True, cmap='inferno_r', cbar_kws={'label': 'Si'}, cbar=False) + ax1.figure.axes[-1].yaxis.label.set_size(14) + ax1.set_title('First Order Sensitivity') + + ax2 = fig.add_subplot(122) + ax2 = sns.heatmap(df_metric_sT_result, yticklabels=y_axis_labels, annot=True, cmap='inferno_r', cbar_kws={'label': 'Si'}) + ax2.figure.axes[-1].yaxis.label.set_size(14) + ax2.set_title('Total Order Sensitivity') + + + + + +.. parsed-literal:: + + Text(0.5, 1.0, 'Total Order Sensitivity') + + + + +.. image:: _static/hymod5.png + + + +The first order sensitivity indicies indicate that HYMOD’s sensitivity +to its parameters is different depending on how its output is measured. +Unsurprisingly, the mean absolute error is highly sensitive to the soil +moisture accounting parameters Huz and B, just like the overall +streamflow predictions above. However, when we examine RMSE and +log(RMSE), the routing parameters Alp become sensitive, and the +sensitivity to parameter B is reduced. As described above, RMSE and +LOG(RMSE) respond to model performance in high-flow and low flow periods +respectively. Our results indicate that for these flow regimes Alp, the +parameter that governs the split between quick and slow flow is an +important factor. While still the parameter with the highest most effect +on all three measures, Huz is much less influential for RMSE and +LOG(RMSE) than it is for MAE. + +The total order sensitivity indicies review a different, more complex +story. While the MAE sensitivity is relatively governed by first order +effects (like the streamflow predictions above), the RMSE and LOG(RMSE) +error metrics show significant interactions. Alp has the highest total +order sensitivity for RMSE and is eqal to Huz for Log(RMSE). Kq, which +has a relatively low first order sensitivity index, shows strong +contribution to variance when interactions are taken into account. + +Radial convergence plots are a helpful way to visualize the interactions +between parameters. These plots array the model parameters in a circle +and plot the first order, total order and second order Sobol sensitivity +indices for each parameter. The first order sensitivity is shown as the +size of a closed circle, the total order as the size of a larger open +circle and the second order as the thickness of a line connecting two +parameters. Below is an example of a radial convergence plot for the +LOG(RMSE) measure. The plot indicates strong interactions between the +Huz and Alp parameters, as well as Alp and Kq. There is also an +interaction between Alp and Ks. + +.. code:: ipython3 + + import numpy as np + import itertools + import seaborn as sns + import math + sns.set_style('whitegrid', {'axes_linewidth': 0, 'axes.edgecolor': 'white'}) + + def is_significant(value, confidence_interval, threshold="conf"): + if threshold == "conf": + return value - abs(confidence_interval) > 0 + else: + return value - abs(float(threshold)) > 0 + + def grouped_radial(SAresults, parameters, radSc=2.0, scaling=1, widthSc=0.5, STthick=1, varNameMult=1.3, colors=None, groups=None, gpNameMult=1.5, threshold="conf"): + # Derived from https://github.com/calvinwhealton/SensitivityAnalysisPlots + fig, ax = plt.subplots(1, 1) + color_map = {} + + # initialize parameters and colors + if groups is None: + + if colors is None: + colors = ["k"] + + for i, parameter in enumerate(parameters): + color_map[parameter] = colors[i % len(colors)] + else: + if colors is None: + colors = sns.color_palette("deep", max(3, len(groups))) + + for i, key in enumerate(groups.keys()): + #parameters.extend(groups[key]) + + for parameter in groups[key]: + color_map[parameter] = colors[i % len(colors)] + + n = len(parameters) + angles = radSc*math.pi*np.arange(0, n)/n + x = radSc*np.cos(angles) + y = radSc*np.sin(angles) + + # plot second-order indices + for i, j in itertools.combinations(range(n), 2): + #key1 = parameters[i] + #key2 = parameters[j] + + if is_significant(SAresults["S2"][i][j], SAresults["S2_conf"][i][j], threshold): + angle = math.atan((y[j]-y[i])/(x[j]-x[i])) + + if y[j]-y[i] < 0: + angle += math.pi + + line_hw = scaling*(max(0, SAresults["S2"][i][j])**widthSc)/2 + + coords = np.empty((4, 2)) + coords[0, 0] = x[i] - line_hw*math.sin(angle) + coords[1, 0] = x[i] + line_hw*math.sin(angle) + coords[2, 0] = x[j] + line_hw*math.sin(angle) + coords[3, 0] = x[j] - line_hw*math.sin(angle) + coords[0, 1] = y[i] + line_hw*math.cos(angle) + coords[1, 1] = y[i] - line_hw*math.cos(angle) + coords[2, 1] = y[j] - line_hw*math.cos(angle) + coords[3, 1] = y[j] + line_hw*math.cos(angle) + + ax.add_artist(plt.Polygon(coords, color="0.75")) + + # plot total order indices + for i, key in enumerate(parameters): + if is_significant(SAresults["ST"][i], SAresults["ST_conf"][i], threshold): + ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["ST"][i]**widthSc)/2, color='w')) + ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["ST"][i]**widthSc)/2, lw=STthick, color='0.4', fill=False)) + + # plot first-order indices + for i, key in enumerate(parameters): + if is_significant(SAresults["S1"][i], SAresults["S1_conf"][i], threshold): + ax.add_artist(plt.Circle((x[i], y[i]), scaling*(SAresults["S1"][i]**widthSc)/2, color='0.4')) + + # add labels + for i, key in enumerate(parameters): + ax.text(varNameMult*x[i], varNameMult*y[i], key, ha='center', va='center', + rotation=angles[i]*360/(2*math.pi) - 90, + color=color_map[key]) + + if groups is not None: + for i, group in enumerate(groups.keys()): + print(group) + group_angle = np.mean([angles[j] for j in range(n) if parameters[j] in groups[group]]) + + ax.text(gpNameMult*radSc*math.cos(group_angle), gpNameMult*radSc*math.sin(group_angle), group, ha='center', va='center', + rotation=group_angle*360/(2*math.pi) - 90, + color=colors[i % len(colors)]) + + ax.set_facecolor('white') + ax.set_xticks([]) + ax.set_yticks([]) + plt.axis('equal') + plt.axis([-2*radSc, 2*radSc, -2*radSc, 2*radSc]) + #plt.show() + + + return fig + + # define groups for parameter uncertainties + groups={"Soil Moisture" : ["Huz", "B"], + "Routing" : ["Alp", "Kq", "Ks"]} + + + fig = grouped_radial(Si, ['Kq', 'Ks', 'Alp', 'Huz', 'B'], groups=groups, threshold=0.025) + + +.. parsed-literal:: + + Soil Moisture + Routing + + + +.. image:: _static/hymod6.png + + +2.4 Time-Varying Sensitivity Analysis + +.. _time: + +In section 2.5 we saw how performing sensitivity analysis on different +measurements of model output can yeild in different results on the +importance of each uncertain input. In this section we’ll examine how +performing this analysis over time can yeild additional insight into the +performance of HYMOD. We’ll first examine how model sensitivities vary +by month, then examine how they change across each year of the +simulation. + +For this demonstration, we’ll focus only on the monthly streamflow +predictions generated by HYMOD. + +2.4.1 Sensitivity analysis indices for each month +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: ipython3 + + # aggregate simulated streamflow data to monthly time series + df_sim_by_mth_mean = Q_df.groupby('month')[sample_column_names].mean() + + # aggregate observed streamflow data to monthly time series + df_obs_by_mth_mean = leaf_data.groupby('month').mean() + + +We can use the following to calculate the SA indices for each month and +visualize it. Results are pre-loaded for efficiency. + +.. code:: python + + # set up dataframes to store outputs + df_mth_s1 = pd.DataFrame(np.zeros((12,5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B']) + df_mth_delta = df_mth_s1.copy() + + # iterate through each month + for i in range(0, 12): + + # generate the simulation data + Y = df_sim_by_mth_mean.iloc[i, :].to_numpy() + + # run SA + Si = delta.analyze(problem_hymod, param_values_hymod, Y, print_to_console=False) + + # add to output dataframes + df_mth_s1.iloc[i, :] = np.maximum(Si['S1'], 0) + df_mth_delta.iloc[i, :] = np.maximum(Si['delta'], 0) + + # convert to arrays + arr_mth_s1 = df_mth_s1.values + arr_mth_delta = df_mth_delta.values + +First-order Indices +^^^^^^^^^^^^^^^^^^^ + +The following can be used to visualize the time-varying first-order +indices. The first order represents the direct impacts of a specific +parameter on model outputs. + +.. tip:: View the source code used to create this plot here: `plot_monthly_heatmap `_ + +.. code:: ipython3 + + # load previously ran data + arr_mth_delta, arr_mth_s1 = msdbook.load_hymod_monthly_simulations() + + # plot figure + ax, ax2 = msdbook.plot_monthly_heatmap(arr_sim=arr_mth_s1.T, + df_obs=df_obs_by_mth_mean, + title='First Order - Mean Monthly SA') + + + + +.. image:: _static/hymod7.png + + + +This figure demonstrates the first order sensitivity indices when the +streamflow data are aggregated by month. The purple line represents the +observed monthly discharge. The figure indicates that the first order +indices are highest for B and Huz across all months and lowest for Alp, +Ks, and Kq. Note that in the months with the highest flow, Ks becomes an +influential parameter. + +Total-order indices +^^^^^^^^^^^^^^^^^^^ + +We can also focus on the total order sensitivity index that includes +first-order SA indices and interactions between parameters + +.. code:: ipython3 + + # plot figure + ax, ax2 = msdbook.plot_monthly_heatmap(arr_sim=arr_mth_delta.T, + df_obs=df_obs_by_mth_mean, + title='Total Order - Mean monthly SA') + + + + +.. image:: _static/hymod8.png + + + +Notably, the total order sensitivity results are different than the +first order sensitivity results, which indicates that interactions +between the parameters (particularly in regards to routing parameters +:math:`Kq`, :math:`Ks`, and :math:`Alp`) contribute to changes in HYMOD +output. + +2.4.2 Annual sensitivity analysis indices +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: ipython3 + + # group by year and get mean + df_sim_by_yr_mean = Q_df.groupby(['year'])[sample_column_names].mean() + + # group input data and get mean + df_obs_by_yr_mean = leaf_data.groupby(['year']).mean() + + +We can also calculate the sensitivity analysis indices for each +individual year. This will allow us to understand if model control +changes during different years. The following code first aggregates the +outputs to annual time steps, and then calculates the SA indices. + +.. code:: python + + # set up dataframes to store outputs + df_yr_s1 = pd.DataFrame(np.zeros((10, 5)), columns=['Kq', 'Ks', 'Alp', 'Huz', 'B']) + df_yr_delta = df_yr_s1.copy() + + # iterate through each year + for i in range(0, 10): + + # generate the simulation data + Y = df_sim_by_yr_mean.iloc[i, :].to_numpy() + + # run SA + Si = delta.analyze(problem_hymod, param_values_hymod, Y, print_to_console=False) + + # add to output dataframes + df_yr_s1.iloc[i, :] = np.maximum(Si['S1'], 0) + df_yr_delta.iloc[i, :] = np.maximum(Si['delta'], 0) + + # convert to arrays + arr_yr_s1 = df_mth_s1.values + arr_yr_delta = df_mth_delta.values + +First-order indices +^^^^^^^^^^^^^^^^^^^ + +.. tip:: View the source code used to create this plot here: `plot_annual_heatmap `_ + +.. code:: ipython3 + + # load previously ran data + arr_yr_delta, arr_yr_s1 = msdbook.load_hymod_annual_simulations() + + # plot figure + ax, ax2 = msdbook.plot_annual_heatmap(arr_sim=arr_yr_s1.T, + df_obs=df_obs_by_yr_mean, + title='First Order - Mean Annual SA') + + + + +.. image:: _static/hymod9.png + + + +The first order sensitivities at the annual scale are not unlike the +first order monthly sensitivities. Once again, sensitivities vary across +year and Huz and B are the most consequential parameters. + +Total-order indices +^^^^^^^^^^^^^^^^^^^ + +.. code:: ipython3 + + # plot figure + ax, ax2 = msdbook.plot_annual_heatmap(arr_sim=arr_yr_delta.T, + df_obs=df_obs_by_yr_mean, + title='Total Order - Mean Annual SA and Observed flow') + + + +.. image:: _static/hymod10.png + + + +Our results indicate that sensitivity analysis indices vary in different +years and now that interactions are included, the Kq, Ks, and Alp +variables impact the sensitivity of the streamflow output. + +2.4.3 Monthly time-varying sensitivity analysis +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Although time-varying sensitivity analysis (TVSA) at average monthly and +average annual temporal resolutions is informative, TVSA is susceptible +to the aggregation issue that we discussed earlier in section 3-2. To +avoid that we can further discretize our time domain to zoom into +individual months. This will provide us with even more information about +model behavior and the sensitivity of different parameters in different +states of the system. The block of code demonstrates how to implement +the monthly TVSA. + +.. code:: python + + # set up dataframes to store outputs + df_vary_s1 = pd.DataFrame(np.zeros((df_obs_mth_mean.shape[0], 5)), + columns=['Kq', 'Ks', 'Alp', 'Huz', 'B']) + + df_vary_delta = df_vary_s1.copy() + + # iterate through each month + for i in range(0, df_obs_mth_mean.shape[0]): + + # generate the simulation data + Y = df_sim_mth_mean.iloc[i, :].to_numpy() + + # run SA + Si = delta.analyze(problem_hymod, param_values_hymod, Y, print_to_console=False) + + # add to output dataframes + df_vary_s1.iloc[i, :] = np.maximum(Si['S1'], 0) + df_vary_delta.iloc[i, :] = np.maximum(Si['delta'], 0) + + # convert to arrays + arr_vary_s1 = df_vary_s1.values + arr_vary_delta = df_vary_delta.values + +First-order indices +^^^^^^^^^^^^^^^^^^^ + +.. tip:: View the source code used to create this plot here: `plot_varying_heatmap `_ + +.. code:: ipython3 + + # load in previously ran data + arr_vary_delta, arr_vary_s1 = msdbook.load_hymod_varying_simulations() + + # plot figure + ax, ax2 = msdbook.plot_varying_heatmap(arr_sim=arr_vary_s1.T, + df_obs=df_obs_mth_mean, + title='First Order - Time-Varying SA') + + + +.. image:: _static/hymod11.png + + + +Compared to the TVSA when streamflow was aggregated, this figure +suggests that Kq is indeed a relevant parameter for influencing +streamflow output when individual months are considered. + +Total order - time varying sensitivity analysis +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code:: ipython3 + + # plot figure + ax, ax2 = msdbook.plot_varying_heatmap(arr_sim=arr_vary_delta.T, + df_obs=df_obs_mth_mean, + title='Total Order - Time-Varying SA') + + + + +.. image:: _static/hymod12.png + + + +As above, the total order sensitivities further indicate the importance +of Kq that is not apparent if aggregation is utilized. + +Tips to Apply this methodology to your own problem +================================================== + +In this tutorial, we demonstrated how to use global sensitivtiy analysis +to explore a complex, non-linear model. We showed how measuring +sensitivity across multiple measures of model performance and temporal +aggregations yeilding differing results about model +sensitivity/behavoir. While these results may seem contraditory, they +provide useful insight into the behavoir of HYMOD. Would we expect the +same parameters to control high flow and low flow regimes within the +model? Maybe, depending on the system, but also, maybe not. This +analysis can provide insight into how the model responds to its input +parameters, allowing us to compare the results to our expectaions. This +may allow us to find problems with our intial assumptions, or call +attention to model features that can be improved or expanded upon. +Depending on the model and context, it may also yield insight into the +workings of the underlying system. + +To run this tutorial on your own model you will need to: + +1. Design your experiment by choosing sampling bounds for your + parameters and setting up the problem dictionary as in step 2-2 +2. Choose the parameters of interest +3. Generate samples using the saltelli.sample function. This step is + problem-dependent and note that the Sobol method can be + computationally intensive depending on the model being analyzed. More + complex models will be slower to run and will also require more + samples to calculate accurate estimates of Sobol indices. Once you + complete this process, pay attention to the confidence bounds on your + sensitivity indices to see whether you need to run more samples. +4. Run the parameter sets through your model and record each of the + desired model outputs. +5. Calculate the Sobol indices for each performance criteria. Now, the Y + will be a numpy array with your external model output and you will + need to include the parameter samples as an additional argument. +6. Follow the procedure in step 2.6 to disaggregate performance across + time diff --git a/dev/docs/html/_sources/A2.5_discovery.rst b/dev/docs/html/_sources/A2.5_discovery.rst new file mode 100644 index 0000000..4487f45 --- /dev/null +++ b/dev/docs/html/_sources/A2.5_discovery.rst @@ -0,0 +1,384 @@ +Time-evolving scenario discovery for infrastructure pathways +**************************************************************************************************** + +.. note:: + + | Run the tutorial interactively: `Scenario Discovery Notebook `_. + | Please be aware that notebooks can take a couple minutes to launch. + | To run the notebooks yourself, download the files `here `_ and use these `requirements `_. + + + +Time-evolving scenario discovery for infrastructure pathways +============================================================ + +The purpose of this tutorial is to explore time-evolving vulnerability for systems that dynamically adapt to changing conditions. Using an example from water supply planning, we'll first examine how performance of a dynamic infrastructure pathway policy changes over time, then use factor mapping (main text Chapter 4.3) to understand which combinations of uncertaities generate vulnerability for two water utilities. Next, we'll perform factor prioritization (main text Chapter 4.3) to determine which uncertainties have the most influence on water supply performance. Finally, we'll provide an open platform to explore vulnerability across multiple measures of performance and different combinations of uncertainties. + +Background +---------- + +The Bedford-Greene metropolitan area (Figure 1) is a stylized water resources test case where two urban water utilities seek to develop an infrastructure and investment and management strategy to confront growing demands and changing climate. The utilities have agreed to jointly construct a new water treatment plant on Lake Classon, a large regional resource. Both utilities have also identified a set of individual infrastructure options to construct if necessary. + +.. figure:: _static/Map_small.png + :alt: Figure 1 + + Figure 1 + +The utilities are formulating a cooperative and adaptive regional management strategy that uses a risk-of-failure (ROF) metric to trigger both short term drought mitigation actions (water use restrictions and treated transfers between utilities) and long-term infrastructure investment decisions (shown in Figure 2a). Both utilities have specified a set of risk triggers and developed a construction order for available infrastructure options. + +The utilities have run a Monte Carlo simulation to evaluate how these policies respond to a wide array of future States Of the World (SOWs). Each SOW represents a different combinations of thirteen uncertain system inputs including demand growth rates, changes to streamflows and financial variables. In this context, a fully specified SOW is composed of one sample of uncertain model inputs (e.g. one projection of demand growth rate coupled with one future streamflow scenario and one projection of future financial conditions). The water utilities used Latin Hypercube sampling (Chapter 3.3 of the main text) to develop an ensemble of 1,000 plausible future SOWs. The Monte Carlo simulation evaluates each candidate water supply infrastructure investment and management policy across all 1,000 SOWs, as shown in Figure 2b. For more details on the Monte Carlo sampling for this type of analysis, see Trindade et al., (2019). + +The ROF-based policies respond to each SOW by generating a unique infrastructure pathway - a sequence of infrastructure investment decisions over time. Infrastructure pathways over a set of 1,000 future SOWs are shown in Figure 2c. Infrastructure options are arrayed along the vertical axis and the sequence of infrastructure investments triggered by the policy is plotted as pathways over time. Since the adaptive rule system generates a unique infrastructure sequence for each scenario, Figure 2c summarizes the ensemble of pathways by clustering SOWs according to infrastructure intensity. Dark green lines represent SOWs where the utilities heavily invest in new infrastructure, light green lines represent SOWs with low infrastructure investment and medium shaded lines represent moderate investment. The shading behind each pathway represents the frequency that each infrastructure option was triggered over time across sampled scenarios + +.. figure:: _static/Policy_MonteCarlo_Pathways_small.png + :alt: Figure 2 + + Figure 2 + +Evaluating Robustness over time +------------------------------- + +The two water utilities are interested in mainting both supply +reliability and financial stability across the broadest set of plausible +future SOWs. To measure the performance of the infrastructure pathway +policy, they’ve defined five critical performance criteria: + +- Reliability > 99% +- Restriction Frequency < 20% +- Peak Financial Cost < 80% of annual revenue (a measure of debt + service spending) +- Worst-case drought management cost < 10% of annual revenue (a measure + of unexpected drought costs) +- Unit Cost of Expansion < 5 dollars/kgal + +To assess the robustness of the infrastructure pathway policy, the two +utilities apply a satisficing metric, which measures the percentage of +sampled SOWs where the pathway policy meets the peformance criteria: + +:math:`R =\frac{1}{N}\sum_{j=1}^{N}\Lambda_{\theta,j}` + +Where, :math:`\Lambda\_{\theta,j}=` + +.. math:: + + \begin{cases} + 1, \quad \textrm{if}\ F(\theta)_{j}\leq \Phi_j \\ + 0, \quad \textrm{otherwise} + \end{cases} + +And :math:`\Phi` is a vector of performance criteria for utility +:math:`j`, :math:`\theta` is the portfolio and :math:`N` is the total +number of sampled SOWs. + +Below, we'll visualize how robustness for the two utilities evolves over the 45-year planning horizon. We'll assess robustness across three time periods, near-term (first 10 years), mid-term (22 years) and long term (45 years). + +We start by loading robustness values for the both utilities. These values are calculated by applying the robustness metric above across 2,000 simulated SOWs. To make this exercise computationally tractable, we've precomputed these values, which can be found in the files "short_term_robustness.csv", "mid_term_robustness.csv" and "long_term_robustness.csv". These values are calculated using the function "check_rdm_meet_criteria" within the helper functions. + +.. code:: ipython3 + + import numpy as np + from matplotlib import pyplot as plt + from functions.eBook_SD_helpers import check_rdm_meet_criteria, create_sd_input, plot_selected_tree_maps, get_factor_importances, open_exploration + import seaborn as sns + + # load Deeply uncertain factors + rdm_factors = np.loadtxt('data/DU_Factors.csv', delimiter= ',') + + sns.set() + short_term_robustness = np.loadtxt('data/short_term_robustness.csv', delimiter= ',') + mid_term_robustness = np.loadtxt('data/mid_term_robustness.csv', delimiter = ',') + long_term_robustness = np.loadtxt('data/long_term_robustness.csv', delimiter = ',') + + # plot robustness over time + fig =plt.figure(figsize=(9,3)) + plt.plot([10,22,45], [short_term_robustness[5]*100, mid_term_robustness[5]*100,long_term_robustness[5]*100], c='#B19CD9') + plt.plot([10, 22, 45], [short_term_robustness[11]*100, mid_term_robustness[11]*100, long_term_robustness[11]*100], c= '#43b284') + plt.scatter([10,22,45], [short_term_robustness[5]*100, mid_term_robustness[5]*100,long_term_robustness[5]*100], s=100, c='#B19CD9') + plt.scatter([10, 22, 45], [short_term_robustness[11]*100, mid_term_robustness[11]*100, long_term_robustness[11]*100], s=100, c='#43b284') + plt.xlabel('Time Horizon (yrs)') + plt.ylabel('Robustness (% SOWs)') + plt.legend(['Bedford', 'Greene']) + plt.title('Robustness Over Time') + plt.ylim([0, 107]) + + + + + +.. parsed-literal:: + + (0.0, 107.0) + + + + +.. image:: _static/discovery_4_1.png + + +Exploring performance evolution +------------------------------- + +The figure above reveals that the robustness of both water utilities degrades over time, with Bedford's robustness declining further than Greene. This suggests that the proposed pathway policy is likely insufficient to meet the long-term needs of the two utilities. But how is the current policy insufficient? To answer that question we examine the performance measures that fail to meet performance criteria for each utility across the three planning horizons. + +.. code:: ipython3 + + # Plot the type of vulnerability over time + + ### Bedford ### + plot_robustness_1 = np.zeros([3,5]) + # Determine the percentage of failure SOWs that violate each criterion (note some SOWS fail multiple criteria, so this may some to >1) + criteria = ['Reliability', 'Restriction Frequency', 'Peak Financial Cost', 'Worst-case drought\nManagement Cost', 'Stranded Assets'] + plot_robustness_1[0,:] = (1 - short_term_robustness[0:5])/(1-short_term_robustness[5]) + plot_robustness_1[1,:] = (1 - mid_term_robustness[0:5])/(1-mid_term_robustness[5]) + plot_robustness_1[2,:] = (1 - long_term_robustness[0:5])/(1-long_term_robustness[5]) + + # Plot over time + fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,4)) + axes[0].bar(np.arange(5), plot_robustness_1[0,:], color='#B19CD9') + axes[0].set_xticks(np.arange(5)) + axes[0].set_xticklabels(criteria, rotation='vertical') + axes[0].set_ylim([0,1]) + axes[0].set_title('10-year Horizon') + axes[0].set_ylabel('Fraction of failure SOWs') + axes[1].bar(np.arange(5), plot_robustness_1[1,:], color='#B19CD9') + axes[1].set_xticks(np.arange(5)) + axes[1].set_xticklabels(criteria, rotation='vertical') + axes[1].set_ylim([0,1]) + axes[1].set_title('22-year Horizon') + axes[2].bar(np.arange(5), plot_robustness_1[2,:], color='#B19CD9') + axes[2].set_xticks(np.arange(5)) + axes[2].set_xticklabels(criteria, rotation='vertical') + axes[2].set_title('45-year Horizon') + axes[2].set_ylim([0,1]) + fig.suptitle('Bedford') + plt.tight_layout() + + ### Greene ### + # Determine the percentage of failure SOWs that violate each criterion (note some SOWS fail multiple criteria, so this may some to >1) + plot_robustness_2 = np.zeros([3, 5]) + plot_robustness_2[0, :] = (1 - short_term_robustness[6:11]) / (1 - short_term_robustness[11]) + plot_robustness_2[1, :] = (1 - mid_term_robustness[6:11]) / (1 - mid_term_robustness[11]) + plot_robustness_2[2, :] = (1 - long_term_robustness[6:11]) / (1 - long_term_robustness[11]) + + fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9, 4)) + axes[0].bar(np.arange(5), plot_robustness_2[0, :], color='#43b284') + axes[0].set_xticks(np.arange(5)) + axes[0].set_xticklabels(criteria, rotation='vertical') + axes[0].set_title('10-year Horizon') + axes[0].set_ylim([0,1]) + axes[0].set_ylabel('Fraction of failure SOWs') + axes[1].bar(np.arange(5), plot_robustness_2[1, :], color='#43b284') + axes[1].set_xticks(np.arange(5)) + axes[1].set_xticklabels(criteria, rotation='vertical') + axes[1].set_title('22-year Horizon') + axes[1].set_ylim([0,1]) + axes[2].bar(np.arange(5), plot_robustness_2[2, :], color='#43b284') + axes[2].set_xticks(np.arange(5)) + axes[2].set_xticklabels(criteria, rotation='vertical') + axes[2].set_title('45-year Horizon') + axes[2].set_ylim([0,1]) + fig.suptitle('Greene') + plt.tight_layout() + + + + +.. image:: _static/discovery_6_0.png + + + +.. image:: _static/discovery_6_1.png + + +In the figures above, we observe that the vulnerability of both utilities changes in different ways. Early in the simulation period, Bedford is vulnerable to failures in reliability (though the robustness figure created in step B5.2 reveals that these failures are very rare). As the simulation period progresses, Bedford's vulnerability expands to include failures in restriction frequency and worst-case cost. These failures indicate that the utility has an overall inability to manage drought conditions and future conditions progress. + +Greene shows a very different evolution in vulnerability. Early in the simulation period, failures manifest in the restriction frequency objective, suggesting that the utility must rely on water use restrictions to maintain supply reliability. As the simulation progresses however, the vulnerability evolves. When evaluated across the 45-year planning horizon, a new failure modes emerges - financial failure manifesting in peak financial cost and stranded assets. This suggests that the proposed pathway policy may be over-investing in new infrastructure, straining the utility's budget with large debt payments that are unnecessary to maintain supply reliability. + +How do deep uncertainties generate vulnerability +------------------------------------------------ + +While the evolution of robustness provides insight into how the system +evolves over time, it does not reveal *why* each utility is vulnerable. +To examine how deep uncertainties generate vulnerability over time for +the two utilities, we perform scenario discovery (factor mapping, +Chapter 4.3). Here we’ll utilize gradient boosted trees to identify +regions of the uncertainty space that cause the utilities to fail to +meet performance criteria. + +.. code:: ipython3 + + # import the performance data across 2000 SOWs for three time periods + short_term_performance = np.loadtxt('data/short_term_performance.csv', delimiter= ',') + mid_term_performance = np.loadtxt('data/mid_term_performance.csv', delimiter = ',') + long_term_performance = np.loadtxt('data/long_term_performance.csv', delimiter = ',') + + satisficing_criteria = [.98, .2, .8, .1, 5] + + # transform into scenario discovery input + short_term_SD_input = create_sd_input(short_term_performance, satisficing_criteria) + mid_term_SD_input = create_sd_input(mid_term_performance, satisficing_criteria) + long_term_SD_input = create_sd_input(long_term_performance, satisficing_criteria) + + # factor mapping Bedford + fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3)) + plot_selected_tree_maps(5, 'short_term', 0, 6, satisficing_criteria, 0, axes[0]) + axes[0].set_title('10-year Horizon') + plot_selected_tree_maps(5, 'mid_term', 0, 6, satisficing_criteria, 0, axes[1]) + axes[1].set_title('22-year Horizon') + plot_selected_tree_maps(5, 'long_term', 0, 1, satisficing_criteria, 0, axes[2]) + axes[2].set_title('45-year Horizon') + fig.suptitle('Bedford Factor Maps') + plt.tight_layout() + + # factor mapping Greene + fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3)) + plot_selected_tree_maps(11, 'short_term', 0, 8, satisficing_criteria, 0, axes[0]) + axes[0].set_title('10-year Horizon') + plot_selected_tree_maps(11, 'mid_term', 0, 6, satisficing_criteria, 0, axes[1]) + axes[1].set_title('22-year Horizon') + plot_selected_tree_maps(11, 'long_term', 0, 1, satisficing_criteria, 0, axes[2]) + axes[2].set_title('45-year Horizon') + fig.suptitle('Greene Factor Maps') + plt.tight_layout() + + + +.. parsed-literal:: + + Factor map for Bedford + Factor map for Bedford + Factor map for Bedford + Factor map for Greene + Factor map for Greene + Factor map for Greene + + + +.. image:: _static/discovery_9_1.png + + + +.. image:: _static/discovery_9_2.png + + +In the figures above, we learn more about how the vulnerability of the two utilities evolves over time. Bedford begins with very few possible failures but appears vulnerable to high demand growth scenarios under future scenarios with high demands. When evaluated across a 22-year planning horizon, Bedford is vulnerable when the near-term demand growth is high and water use restrictions are less effective than predicted. Under the full 45-year planning horizon, Bedford is vulnerable to sustained high levels of demand growth, failing if either near-term or mid-term demand growth exceeds expected levels. + +Greene's vulnerability evolves differently. It begins with vulnerability to high demand growth, but as the simulation progresses (and infrastructure is constructed), the utility becomes vulnerable to low-demand growth futures which cause the failures in financial criteria shown in section B.5.3. This indicates that the pathway policy over-builds in many SOWs, and becomes financially unstable if demand does not grow sufficiently to provide revenue to cover debt service payments. + +Which uncertainties have the most influence on time-evolving performance? +------------------------------------------------------------------------- + +The factor maps generated in B.5.4 present the vulnerability generated by the two most important deep uncertainties as determined by Gradient Boosted Trees. Yet the factor prioritization shows that more than two uncertainties are influential to regional performance. Further, we can observe that individual uncertainties have different impacts on each performance obejctive, and these impacts may change over time. In the cells below, explore the impact of deep uncertainty by generating factor maps for different combinations of deep uncertain factors, objectives and time horizons. + +.. code:: ipython3 + + sns.set_style('white') + uncertainties = ['D1', 'D2', 'D3', 'BT', 'BM', 'DR', 'RE', 'EV', 'PM', 'CT', 'IA', 'IF', 'IP'] + uncertainties = ['Near-term demand', 'Mid-term demand', 'Long-term demand', 'Bond Term', 'Bond Rate', 'Discount Rate', 'Restriction Effectiveness', 'Evaporation Rate', 'Permitting time', 'Construction time', 'Inflow Amplitude', 'Inflow Frequency', 'Inflow Period'] + + u1_st_FI = get_factor_importances(short_term_SD_input, rdm_factors, 250, 4, 5) + u1_mt_FI = get_factor_importances(mid_term_SD_input, rdm_factors, 250, 4, 5) + u1_lt_FI = get_factor_importances(long_term_SD_input, rdm_factors, 250, 4, 5) + + u1_all = np.vstack([u1_st_FI,u1_mt_FI, u1_lt_FI]) + u1_all = np.transpose(u1_all) + + # factor ranking -- utility 2 + u2_st_FI = get_factor_importances(short_term_SD_input, rdm_factors, 250, 4, 11) + u2_mt_FI = get_factor_importances(mid_term_SD_input, rdm_factors, 250, 4, 11) + u2_lt_FI = get_factor_importances(long_term_SD_input, rdm_factors, 250, 4, 11) + u2_all = np.vstack([u2_st_FI,u2_mt_FI, u2_lt_FI]) + u2_all = np.transpose(u2_all) + + fig, (ax, ax2, cax) = plt.subplots(ncols=3,figsize=(5,5), + gridspec_kw={"width_ratios":[1,1, 0.1]}) + fig.subplots_adjust(wspace=0.3) + im = ax.imshow(u1_all, cmap='Reds', vmin=0, vmax=.3) + ax.set_yticks(np.arange(13)) + ax.set_yticklabels(uncertainties) + ax.set_xticks(np.arange(3)) + ax.set_xlabel('Time Horizon') + ax.set_title('Bedford') + + im1 = ax2.imshow(u2_all, cmap='Reds', vmin=0, vmax=.3) + ax2.set_yticks(np.arange(13)) + ax2.set_yticklabels([]) + ax2.set_xticks(np.arange(3)) + ax2.set_xlabel('Time Horizon') + ax2.set_title('Greene') + fig.colorbar(im, cax=cax, label='Factor Importance') + plt.tight_layout() + + + + +.. image:: _static/discovery_12_0.png + + + + +The Figure above shows the factor importance as determined by gradient boosted trees for both utilities across the three planning horizons. While near-term demand growth is important for both utilities under all three planning horizons, the importance of other factors evolves over time. For example, restriction effectiveness plays an important role for Greene under the 22-year planning horizon but disappears under the 45-year planning horizon. In contrast, the bond interest rate is important for predicting success over the 45-year planning horizon, but does not appear important over the 10- or 22-year planning horizons. These findings highlight how assumptions about the planning period can have a large impact on modeling outcomes. + +Open exploration +---------------- + +In the cell below, use the function to explore how factor maps change +for the two utilities based upon the uncertainties plotted, the +objectives of interest and the time horizon. + +.. code:: ipython3 + + # specify the utility ("Bedford" or "Greene") + utility = "Bedford" + + # specify which performance objectives to investigate (note that not all performance objectives have failures, which may result in a blank factor map) + # set this to one of the following: "Reliability", "Restriction Frequency", "Peak Financial Cost", "Worst Case Cost" or "Unit Cost" + objective = "Reliability" + + # select uncertainties from the following list: 'D1', 'D2', 'D3', 'BT', 'BM', 'DR', 'RE', 'EV', 'PM', 'CT', 'IA', 'IF', 'IP' + uncertainty_1 = 'D1' + uncertainty_2 = 'D2' + + # The code below will plot factor maps over the three planning horizons for the information above + fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9,3)) + open_exploration(utility, objective, 'short_term', uncertainty_1, uncertainty_2, axes[0]) + open_exploration(utility, objective, 'mid_term', uncertainty_1, uncertainty_2, axes[1]) + open_exploration(utility, objective, 'long_term', uncertainty_1, uncertainty_2, axes[2]) + plt.tight_layout() + + + +.. parsed-literal:: + + Factor map for Bedford, reliability + Factor map for Bedford, reliability + Factor map for Bedford, reliability + + + +.. image:: _static/discovery_16_1.png + + + + +Tips to apply this methodology to your own problem +-------------------------------------------------- + +In this tutorial, we demonstrated time-evolving scenario discovery for a +cooperative water supply system. To apply this workflow to your own +problem: + +1. Choose sampling bounds for your parameters of interest, which will + represent uncertainties that characterize your system. +2. Generate samples for these parameters (this can be done using the + saltelli.sample function as in B.2 or done with another package). +3. Define performance criteria for your problem +4. Evaluate parameter sets through your model, and save performance + measures across multiple time horizons +5. Draw from the supporting code for this tutorial to perform scneario + discovery and visualize results + + +References +-------------------------------------------------- +Trindade, B. C., Reed, P. M., & Characklis, G. W. (2019). Deeply uncertain pathways: Integrated multi-city regional water supply infrastructure investment and portfolio management. Advances in Water Resources, 134, 103442. diff --git a/dev/docs/html/_sources/A2.6_hmm.rst b/dev/docs/html/_sources/A2.6_hmm.rst new file mode 100644 index 0000000..781567d --- /dev/null +++ b/dev/docs/html/_sources/A2.6_hmm.rst @@ -0,0 +1,975 @@ +A Hidden-Markov Modeling Approach to Creating Synthetic Streamflow Scenarios Tutorial +**************************************************************************************************** + +.. note:: + + | Run the tutorial interactively: `HMM Notebook `_. + | Please be aware that notebooks can take a couple minutes to launch. + | To run the notebooks yourself, download the files `here `_ and use these `requirements `_. + + +A Hidden-Markov Modeling Approach to Creating Synthetic Streamflow Scenarios +============================================================================ + +In this notebook, we will be covering the basics of fitting a Hidden +Markov Model-based synthetic streamflow generator for a single site in +the Upper Colorado River Basin. First, we will characterize the observed +historical flow in the basin from 1909-2013. Then, we will fit a +synthetic streamflow generator to the observed flows in the basin in +order to create stationary synthetic flows. Finally, we will create a +non-stationary version of the generator to create flows that could be +representative of plausible future climate in the region. We ultimately +show how to place the synthetically generated flows in the context of +physically-informed CMIP5 projections to compare the two methods. + +Background +---------- + +In the Western United States (US), and particularly the Colorado River +Basin, a recent study used tree-ring reconstructions to suggest that the +megadrought that has been occurring in the Southwest over the past 22 +years is the region’s worst drought since about 800 AD (Williams et al., +2022). The study’s lead author, UCLA climatologist Park Williams, +suggested that had the sequence of wet-dry years occurred without +anthropogenic forcing, the 2000s would have likely still been dry, but +not on the same level as the worst of the last millennium’s +megadroughts. + +The recent trend of warming and reduced soil moisture in the Southwest +US is highly challenging from a water systems planning and management +perspective for the Colorado River Basin. Given the wide recognition +that the river is over-allocated, the most recent drought highlights the +difficulty of sustaining the flow requirements as dictated by the +Colorado Compact. Thus, there has been an increasing focus in +exploratory modeling efforts to clarify how vulnerable water systems in +this region are to plausible drought streamflow scenarios for the +future. In this tutorial, we’ll discuss how to create these scenarios +using a Hidden Markov Model (HMM)- based streamflow synthetic generator. +As discussed in `Section +2.1 `__ +and +`4.2 `__ +of the eBook, future climate conditions in the basin represent a deep +uncertainty that can lead to highly consequential water scarcity +outcomes. It is advantageous to create a model such as the HMM-based +generator in order to facilitate the creation of many ensembles of +streamflow that can ultimately be used to force regional water systems +models to understand how variability and drought extremes affect +regional water shortages, operations, and policies. + +.. raw:: html + +
+
+
Lake Powell shows persistent effects from drought (Source: U.S. Bureau of Reclamation)
+
+ +Let’s Get Started! +================== + +Observed Record +--------------- + +First, let’s take a look at the observed data from 1909-2013 for a +specific site. In this example, we use the outlet gauge of the Upper +Colorado River (USGS Gauge 09163500 at the Colorado-Utah state line). +Below, we create a plot of the annual streamflow. + +.. code:: ipython3 + + # Import libraries + import matplotlib as mpl + import matplotlib.pyplot as plt + import matplotlib.patches as patches + import numpy as np + import pandas as pd + from random import random + from SALib.sample import latin + from scipy import stats as ss + import statistics + import statsmodels.api as sm + + + # Import helper functions from local package + from functions import fitmodel + from functions import plotstates + from functions import plotdist + + +.. code:: ipython3 + + # Read in annual historical data + AnnualQ = pd.read_csv('data/uc_historical.csv') + AnnualQ['Year'] = list(range(1909, 2014)) + + # Plot a line graph + fig, ax = plt.subplots(figsize=(12, 8)) + ax.plot(AnnualQ.iloc[:, 1], + AnnualQ.iloc[:, 0], + color='#005F73', + label='Annual') + + # Add labels and title + ax.set_title("Upper Colorado Annual Flow") + ax.set_xlabel("Year", fontsize=16) + ax.set_ylabel("Annual Flow (cubic feet per year)", fontsize=16) + plt.xticks(fontsize=12) + plt.yticks(fontsize=12) + mpl.rc('legend', fontsize=16) + legend = plt.legend(loc="upper right") + plt.show() + plt.close() + + + + +.. image:: _static/hmm_9_0.png + + +Let’s calculate an 11-year rolling mean of the same data to get a sense +of long-term trends. + +.. code:: ipython3 + + fig, ax = plt.subplots(figsize=(12, 8)) + + # Plot the original line graph + plt.plot(AnnualQ.iloc[:,1], + AnnualQ.iloc[:,0], + color='#005F73', + label='Annual') + + # Plot an 11-year rolling mean + plt.plot(AnnualQ.iloc[:, 1].rolling(11).mean(), + AnnualQ.iloc[:, 0].rolling(11).mean(), + color='#183A2E', + label='11-Year Rolling Mean') + + # Add labels and title + plt.title("Upper Colorado Annual Flow") + ax.set_xlabel("Year",fontsize=16) + ax.set_ylabel("Annual Flow (cubic feet per year)", fontsize=16) + plt.xticks(fontsize=12) + plt.yticks(fontsize=12) + mpl.rc('legend', fontsize=16) + legend = plt.legend() + plt.show() + plt.close() + + + + +.. image:: _static/hmm_11_0.png + + +The Colorado Compact, which prescribes flows between the Upper and Lower +Colorado Basins, was negotiated using data prior to 1922, a time period +revealed by the above figure to be one of the consistently wetter +periods on record. It’s clear today that since the 1980s, the Southwest +US has been experiencing aridification (Overpeck et al., 2020) and that +this observed record alone isn’t an accurate representation of what +future climate might look like in this region. + +Let’s get a little more specific and formally quantify decadal droughts +that have occurred in the observed period. We use a metric proposed in +Ault et al. (2014). The authors define a decadal drought as when the +11-year rolling mean falls below a threshold that is 1/2 a standard +deviation below the overall mean of the record. We can then highlight +the block of years that fall in a decadal drought using yellow +rectangles below. + +.. code:: ipython3 + + # Define drought threshold + std = statistics.stdev(AnnualQ.iloc[:, 0]) + threshold = np.mean(AnnualQ.iloc[:, 0] - (0.5 * std)) + + # Find where the rolling mean dip below the threshold? + drought_instances = [i for i, v in enumerate(AnnualQ.iloc[:,0].rolling(11).mean()) if v < threshold] + drought_years = AnnualQ.iloc[:, 1].rolling(11).mean()[drought_instances] + + # Add labels and title + fig, ax = plt.subplots(figsize=(12, 8)) + ax.plot(AnnualQ.iloc[:,1], + AnnualQ.iloc[:,0], + color='#005F73', + label='Annual') + + ax.plot(AnnualQ.iloc[:,1].rolling(11,center=True).mean(), + AnnualQ.iloc[:,0].rolling(11,center=True).mean(), + color='#183A2E', + label='11-Year Rolling Mean') + + ax.axhline(y=threshold, + color='black', + linestyle='--', + label='Drought Threshold') + + # Visualize the drought periods as yellow rectangles + for i in drought_years: + + # Plot a box centered around those values and with 5 years on either side. + rect = patches.Rectangle((i-5,0), 11,2e7, linewidth=1, edgecolor='#EFE2BE', facecolor='#EFE2BE') + + # Add the patch to the Axes + ax.add_patch(rect) + + + plt.title("Upper Colorado Annual Flow") + ax.set_xlabel("Year", fontsize=16) + ax.set_ylabel("Annual Flow (cubic feet per year)", fontsize=16) + plt.xticks(fontsize=12) + plt.yticks(fontsize=12) + mpl.rc('legend', fontsize=16) + legend = plt.legend() + plt.show() + plt.close() + + + + +.. image:: _static/hmm_14_0.png + + +By this metric, the Upper Colorado Basin region has experienced two +decadal droughts over the last century. + +Synthetic Stationary Generator to Better Quantify Natural Variability +--------------------------------------------------------------------- + +It is important to remember that the streamflow that we have observed in +the region over the last century is only one instance of the hydrology +that could occur since the atmosphere is an inherently stochastic +system. Thus, we require a tool that will allow us to see multiple +plausible realizations of the streamflow record to understand the +internal variability that characterizes the historical period. One +observed realization of historical streamflow is limited in its ability +to capture rare extremes; plausible (but not observed) alternative +instances of streamflow records can help to fill this gap. The tool that +we use to develop synthetic flows for the region is a Gaussian Hidden +Markov Model (HMM). If a system follows a Markov process, it switches +between a number of “hidden states” dictated by a transition matrix. +Each state has its own Gaussian probability distribution (defined by a +mean and standard deviation) and one can draw from this distribution to +create synthetic flows that fit the properties of the historical +distribution. HMMs are an attractive choice for this region because they +can simulate persistence (i.e., long duration droughts), which is a +characteristic of the region’s hydro-climatology. The figure below shows +an example of a 2-state Gaussian HMM that we will be fitting for this +example. + +.. raw:: html + +
+
+
Two-state Gaussian HMM with mean and standard deviation parameters
+
+ +Below is the code that fits the HMM model to the last 2/3 of the +historical record of log annual flows at the CO-UT stateline gauge and +creates an alternative trace of 105 years. A subset of the dataset is +chosen in order to minimize overfitting and to retain a set of data for +validation of the model. When we fit our model, we utilize the +Baum-Welch algorithm (a special version of the expectation-maximization +algorithm) to find the optimal parameters that maximize the likelihood +of seeing the observed flows. Ultimately, the algorithm will return a +mean and standard deviation associated with each state (mus and sigmas +defined below) and a 2x2 transition probability matrix that captures the +likelihood of transitioning between states (P). We can also retrieve the +annual hidden states across the observed series, also known as the +Viterbi sequence of states, which classifies each year in a “wet” or +“dry” state. + +.. code:: ipython3 + + # Number of years for alternative trace + n_years = 105 + + # Import historical data that it used to fit HMM model + AnnualQ_h = pd.read_csv('data/uc_historical.csv') + + # Fit the model and pull out relevant parameters and samples + logQ = np.log(AnnualQ_h) + hidden_states, mus, sigmas, P, logProb, samples, model = fitmodel.fitHMM(logQ, n_years) + + +We’ve fit our HMM, but what does the model look like? Let’s plot the +annual time series of hidden states, or the Viterbi sequence. In the +code, above, we have defined that the drier state is always represented +by state 0. Thus, we know that hidden_states = 0 corresponds to the dry +state and hidden_states = 1 to the wet state. + +.. code:: ipython3 + + # Plot Vitebi sequence + plotstates.plotTimeSeries(np.log(AnnualQ.iloc[:,0]), hidden_states, 'Annual Flow (cube feet per year)') + + + + +.. image:: _static/hmm_21_0.png + + +In the figure above, we see that the years with the higher log flows +tend to be classified in a “wet” state and the opposite is true of the +“dry” state. We can also print the transition matrix, which shows the +likelihood of transitioning between states. Note that the system has a +high likelihood of persisting in the same state. + +.. code:: ipython3 + + print(model.transmat_) + + +.. parsed-literal:: + + [[0.65095026 0.34904974] + [0.3205531 0.6794469 ]] + + +Let’s also plot the distribution of log annual flows associated with the +wet and dry states. + +.. code:: ipython3 + + # Plot wet and dry state distributions + plotdist.plotDistribution(logQ, mus, sigmas, P) + + + + +.. image:: _static/hmm_25_0.png + + +The wet state distribution is characterized by a greater mean flow, but +note that there is significant overlap in the tails of the distributions +below which demonstrates why years with similiar flows can be classified +in different states. + +Now let’s see what the drought dynamics look like in the synthetic +scenario that we created using the same definition that we had used for +the historical period. + +.. code:: ipython3 + + # Retrieve samples and back-transform out of log space + AnnualQ_s = np.exp(samples[0]) + AnnualQ_s = pd.DataFrame(AnnualQ_s) + AnnualQ_s['Year'] = list(range(1909, 2014)) + + # Define drought threshold + std=statistics.stdev(AnnualQ_s.iloc[:, 0]) + threshold=np.mean(AnnualQ_s.iloc[:, 0] - (0.5 * std)) + + # Where does the rolling mean dip below the threshold + drought_instances = [i for i,v in enumerate(AnnualQ_s.iloc[:, 0].rolling(11).mean()) if v < threshold] + drought_years = AnnualQ_s.iloc[:, 1].rolling(11).mean()[drought_instances] + + #Visualize the streamflow scenario + fig, ax = plt.subplots(figsize=(12, 8)) + + #Plot the original line graph + ax.plot(AnnualQ_s.iloc[:,1], + AnnualQ_s.iloc[:,0], + color='#005F73', + label='Annual') + + #Plot a 11-year rolling mean + ax.plot(AnnualQ_s.iloc[:,1], + AnnualQ_s.iloc[:,0].rolling(11, center=True).mean(), + color='#183A2E', + label='11-Year Rolling Mean') + + # Add labels and title + ax.axhline(y=threshold, + color='black', + linestyle='--', + label='Drought Threshold') + + + for i in drought_years: + + #Plot a box centered around those values and with 5 years on either side. + rect = patches.Rectangle((i - 5, + 0), + 11, + 2e7, + linewidth=1, + edgecolor='#EFE2BE', + facecolor='#EFE2BE') + + # Add the patch to the Axes + ax.add_patch(rect) + + + plt.title("Upper Colorado Annual Flow (Synthetic Stationary)",fontsize=16) + plt.xlabel("Year", fontsize=16) + plt.ylabel("Annual Flow (cubic feet per year)", fontsize=16) + mpl.rc('legend', fontsize=16) + plt.legend() + plt.xticks(fontsize=12) + plt.yticks(fontsize=12) + plt.show() + plt.close() + + + + +.. image:: _static/hmm_28_0.png + + +You can sample from the model and create more 105-year traces and note +how the location and number of decadal droughts changes. This +demonstrates how different the historical record can look just within +the range of natural variability. It’s also important to remember that +when droughts occur can also define the ultimate effect of the drought +(i.e. is it a time when there is a large population growth or a time +when humans can adapt by conserving or building more infrastructure?). A +hydrologic drought need not manifest into an agricultural or operational +drought of the same magnitude if stored surface water is available. + +We externally run the HMM many times to create a dataset of 100 +instances of the 105-year traces and 1000 instances of the 105-year +traces that are available in the package +(“synthetic_stationary_small_sample_100.csv”,“synthetic_stationary_large_sample_1000”). +The shaded green lines correspond to the flow duration curves (FDCs) for +the generated streamflow traces in comparison with the FDC of the +historical record in beige. + +.. raw:: html + +
+
+
Generated streamflow traces in comparison with the FDC of the historical record.
+
+ +As expected, the stationary synthetic FDCs envelope the historical FDC +and particularly, the synthetic traces offer many more instances of low +flow conditions that could lead to more extreme drought conditions than +what has been observed historically. It is also useful to check for +convergence of samples and to determine how many samples are needed to +fully represent internal variability. Above we see that the extension to +1000 instances of 105-year traces fills out regions of the FDC, +including creating some more extreme drought conditions, but that +additional samples will likely not fill out the FDC substantially more. + +Non-Stationary Synthetic Generator to Impose Climate Changes +------------------------------------------------------------ + +Now, we create flows under non-stationary conditions to get a better +understanding of what flows can look like under climate changes. In +order to create flows under non-stationary conditions, we can toggle the +parameters of the HMM model in order to create systematic changes to the +model that can represent a changing climate. The HMM has 6 parameters +that define it. When we fit the historical model, the parameters that +are fit represent a baseline parameter value. In this non-stationary +generator, we define a range to sample these parameters from. + ++-----------------------+---------------+-------------+-------------+ +| Parameter | Current Value | Lower Bound | Upper Bound | ++=======================+===============+=============+=============+ +| Log-Space Wet State | 1.00 | 0.98 | 1.02 | +| Mean Multiplier | | | | ++-----------------------+---------------+-------------+-------------+ +| Log-Space Dry State | 1.00 | 0.98 | 1.02 | +| Mean Multiplier | | | | ++-----------------------+---------------+-------------+-------------+ +| Log-Space Wet State | 1.00 | 0.75 | 1.25 | +| Standard Deviation | | | | +| Multiplier | | | | ++-----------------------+---------------+-------------+-------------+ +| Log-Space Dry State | 1.00 | 0.75 | 1.25 | +| Standard Deviation | | | | +| Multiplier | | | | ++-----------------------+---------------+-------------+-------------+ +| Change in Dry-Dry | 0.00 | -0.30 | +0.30 | +| Transition | | | | +| Probability | | | | ++-----------------------+---------------+-------------+-------------+ +| Change in Wet-Wet | 0.00 | -0.30 | +0.30 | +| Transition | | | | +| Probability | | | | ++-----------------------+---------------+-------------+-------------+ + +Now let’s sample 1000 times from these bounds to create 1000 new +parameterizations of the model. Here we use SALib and the Latin +Hypercube sample function. + +.. code:: ipython3 + + # Create problem structure with parameters that we want to sample + problem = { + 'num_vars': 6, + 'names': ['wet_mu', 'dry_mu', 'wet_std','dry_std','dry_tp',"wet_tp"], + 'bounds': [[0.98, 1.02], + [0.98, 1.02], + [0.75,1.25], + [0.75,1.25], + [-0.3,0.3], + [-0.3,0.3]] + } + + # generate 1000 parameterizations + n_samples = 1000 + + # set random seed for reproducibility + seed_value = 123 + + # Generate our samples + LHsamples = latin.sample(problem, n_samples, seed_value) + + +Now let’s look at what some of the traces look like in our +non-stationary generator. Let’s choose a random instance from the +1000-member space and adjust the parameters accordingly. + +.. code:: ipython3 + + # Define static parameters + n_years = 105 + + # Sample parameter; Adjust to any sample number from 0-999 + sample = 215 + + # Create empty arrays to store the new Gaussian HMM parameters for each SOW + Pnew = np.empty([2,2]) + piNew = np.empty([2]) + musNew_HMM = np.empty([2]) + sigmasNew_HMM = np.empty([2]) + logAnnualQ_s = np.empty([n_years]) + + # Calculate new transition matrix and stationary distribution of SOW at last node as well as new means and standard deviations + Pnew[0, 0] = max(0.0, min(1.0, P[0, 0] + LHsamples[sample][4])) + Pnew[1, 1] = max(0.0, min(1.0, P[1, 1] + LHsamples[sample][5])) + Pnew[0, 1] = 1 - Pnew[0, 0] + Pnew[1, 0] = 1 - Pnew[1, 1] + eigenvals, eigenvecs = np.linalg.eig(np.transpose(Pnew)) + one_eigval = np.argmin(np.abs(eigenvals - 1)) + piNew = np.divide(np.dot(np.transpose(Pnew), eigenvecs[:, one_eigval]), + np.sum(np.dot(np.transpose(Pnew), eigenvecs[:,one_eigval]))) + + musNew_HMM[0] = mus[0] * LHsamples[sample][1] + musNew_HMM[1] = mus[1] * LHsamples[sample][0] + sigmasNew_HMM[0] = sigmas[0] * LHsamples[sample][3] + sigmasNew_HMM[1] = sigmas[1] * LHsamples[sample][2] + + # Generate first state and log-space annual flow at last node + states = np.empty([n_years]) + if random() <= piNew[0]: + states[0] = 0 + logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0]) + else: + states[0] = 1 + logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1]) + + # Generate remaining state trajectory and log space flows at last node + for j in range(1, n_years): + if random() <= Pnew[int(states[j-1]), int(states[j-1])]: + states[j] = states[j-1] + else: + states[j] = 1 - states[j-1] + + if states[j] == 0: + logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0]) + else: + logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1]) + + # Convert log-space flows to real-space flows + AnnualQ_s = np.exp(logAnnualQ_s)-1 + + +Now let’s see what this synthetic trace looks like. + +.. code:: ipython3 + + # Retrieve samples and back-transform out of log space + AnnualQ_s = pd.DataFrame(AnnualQ_s) + AnnualQ_s['Year'] = list(range(1909, 2014)) + + # Define drought threshold + std = statistics.stdev(AnnualQ_s.iloc[:, 0]) + threshold = np.mean(AnnualQ_s.iloc[:, 0] - (0.5 * std)) + + # Where does the rolling mean dip below the threshold + drought_instances = [i for i, v in enumerate(AnnualQ_s.iloc[:, 0].rolling(11).mean()) if v < threshold] + drought_years = AnnualQ_s.iloc[:, 1].rolling(11).mean()[drought_instances] + + # Visualize the streamflow scenario + fig, ax = plt.subplots(figsize=(12, 8)) + + # Plot the original line graph + ax.plot(AnnualQ_s.iloc[:,1], + AnnualQ_s.iloc[:,0], + color='#005F73', + label='Annual') + + # Plot a 11-year rolling mean + ax.plot(AnnualQ_s.iloc[:, 1], + AnnualQ_s.iloc[:, 0].rolling(11, center=True).mean(), + color='#183A2E', + label='11-Year Rolling Mean') + + # Add labels and title + ax.axhline(y=threshold, + color='black', + linestyle='--', + label='Drought Threshold') + + + for i in drought_years: + + # Plot a box centered around those values and with 5 years on either side. + rect = patches.Rectangle((i - 5,0), + 11, + 2e7, + linewidth=1, + edgecolor='#EFE2BE', + facecolor='#EFE2BE') + + # Add the patch to the Axes + ax.add_patch(rect) + + + plt.title("Annual Flow (Synthetic Non-Stationary)", fontsize=16) + plt.xlabel("Year", fontsize=16) + plt.ylabel("Annual Flow (cubic feet per year)", fontsize=16) + plt.xticks(fontsize=12) + plt.yticks(fontsize=12) + mpl.rc('legend', fontsize=16) + legend = plt.legend(loc="upper right") + plt.show() + plt.close() + + + + + +.. image:: _static/hmm_40_0.png + + +Above is the example trace from the new non-stationary model. You may +see fewer or more decadal drought instances. We can further summarize +overall decadal drought characteristics across the samples. Let’s plot a +histogram of the total number of times we go below the drought threshold +across these realizations. + +.. code:: ipython3 + + decadal_drought_occurence=np.empty([1000]) + + for y in range(1000): + + # Create empty arrays to store the new Gaussian HMM parameters for each SOW + Pnew = np.empty([2, 2]) + piNew = np.empty([2]) + musNew_HMM = np.empty([2]) + sigmasNew_HMM = np.empty([2]) + logAnnualQ_s = np.empty([n_years]) + + # Calculate new transition matrix and stationary distribution of SOW at last node + # as well as new means and standard deviations + + Pnew[0, 0] = max(0.0,min(1.0, P[0, 0] + LHsamples[y][4])) + Pnew[1, 1] = max(0.0,min(1.0, P[1, 1] + LHsamples[y][5])) + Pnew[0, 1] = 1 - Pnew[0, 0] + Pnew[1, 0] = 1 - Pnew[1, 1] + eigenvals, eigenvecs = np.linalg.eig(np.transpose(Pnew)) + one_eigval = np.argmin(np.abs(eigenvals - 1)) + piNew = np.divide(np.dot(np.transpose(Pnew), eigenvecs[:, one_eigval]), + np.sum(np.dot(np.transpose(Pnew), eigenvecs[:, one_eigval]))) + + musNew_HMM[0] = mus[0] * LHsamples[y][1] + musNew_HMM[1] = mus[1] * LHsamples[y][0] + sigmasNew_HMM[0] = sigmas[0] * LHsamples[y][3] + sigmasNew_HMM[1] = sigmas[1] * LHsamples[y][2] + + # Generate first state and log-space annual flow at last node + states = np.empty([n_years]) + if random() <= piNew[0]: + states[0] = 0 + logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0]) + else: + states[0] = 1 + logAnnualQ_s[0] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1]) + + # generate remaining state trajectory and log space flows at last node + for j in range(1, n_years): + if random() <= Pnew[int(states[j-1]), int(states[j-1])]: + states[j] = states[j-1] + else: + states[j] = 1 - states[j-1] + + if states[j] == 0: + logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[0], sigmasNew_HMM[0]) + else: + logAnnualQ_s[j] = ss.norm.rvs(musNew_HMM[1], sigmasNew_HMM[1]) + + # Convert log-space flows to real-space flows + AnnualQ_s = np.exp(logAnnualQ_s) - 1 + AnnualQ_s = pd.DataFrame(AnnualQ_s) + AnnualQ_s['Year'] = list(range(1909, 2014)) + + # Define drought threshold + std = statistics.stdev(AnnualQ_s.iloc[:, 0]) + threshold = np.mean(AnnualQ_s.iloc[:, 0] - (0.5 * std)) + + # Where does the rolling mean dip below the threshold + drought_instances = [i for i, v in enumerate(AnnualQ_s.iloc[:, 0].rolling(11).mean()) if v < threshold] + decadal_drought_occurence[y] = len(drought_instances) + + +.. code:: ipython3 + + fig, ax = plt.subplots(figsize=(12, 8)) + ax.hist(decadal_drought_occurence,label='Non-Stationary generator',color="#005F73") + ax.set_xlabel('Number of Instances of Decadal Drought',fontsize=16) + ax.set_ylabel('Frequency',fontsize=16) + ax.axvline(x=2, color='r', linestyle='-',label='Observed') + mpl.rc('legend', fontsize = 16) + plt.xticks(fontsize = 12) + plt.yticks(fontsize = 12) + plt.show() + plt.close() + + + + +.. image:: _static/hmm_43_0.png + + +Note how many more instances of the decadal droughts we are creating +with the non-stationary generator than our observed 105-year trace which +creates a rich space in which we can test our models. Just as we did +with the stationary generator, we can externally run the non-stationary +generator to create 10,000 instances of the 105-year traces that are +available in the package +(“synthetic_nonstationary_large_sample_10000.csv”). The shaded green and +blue lines correspond to the FDCs for the stationary and non-stationary +generated streamflow traces in comparison with the FDC of the historical +record in beige. Note how the non-stationary generator produces even +more drought extremes than the stationary non-synthetic traces. + +.. raw:: html + +
+
+
Generated streamflow traces in comparison with the FDC of the historical record.
+
+ +Placing CMIP5 Projections in the Context of Non-Stationary Flows +---------------------------------------------------------------- + +We have broadened the drought conditions that we are creating which that +can be very useful to understand how our water systems model performs +under potentially extreme scenarios. However, it’s useful to compare our +bottom-up synthetically generated flows in the context of global +physically-driven CMIP5 projections to get a better understanding of how +the two approaches compare. We first aquire 97 CMIP5 projections from +the Colorado River Water Availability Study (CWCB, 2012). In each of +these projections, monthly precipitation factor changes and temperature +delta changes were computed between mean projected 2035–2065 climate +statistics and mean historical climate statistics from 1950–2013. These +97 different combinations of 12 monthly precipitation multipliers and 12 +monthly temperature delta shifts were applied to historical +precipitation and temperature time series from 1950–2013. The resulting +climate time series were run through a Variable Infiltration Capacity +(VIC) model of the UCRB, resulting in 97 time series of projected future +streamflows at the Colorado‐Utah state line. + +We fit an HMM to each trace of projected streamflow and get a set of +corresponding HMM parameters. Then we take the ratio between these +parameters and the baseline HMM parameters that we calculated earlier in +the notebook in order to calculate the multipliers associated with each +CMIP5 projection. This is all done externally, so we import the +resulting multipliers in the next line. + +.. code:: ipython3 + + # Read in CMIP5 and paleo multipliers + CMIP5_multipliers = pd.read_csv('data/CMIP5_SOWs.txt', header=None, sep=" ") + + +Let’s plot a response surface that will allow us to see how combinations +of HMM parameters tend to influence decadal drought. In order to get a +continuous surface, we’ll fit a non-linear regression to the parameter +values and then predict the decadal drought over a set of grid points. +We fit the response surface for two parameters that should have an +affect on decadal drought: the dry distribution mean and the dry-dry +transition probabilites. + +.. code:: ipython3 + + # Choose two parameters to fit the response surface for + mu_dry=[i[1] for i in LHsamples] + tp_dry=[i[4] for i in LHsamples] + + # Create an interpolation grid + xgrid = np.arange(np.min(mu_dry), + np.max(mu_dry), + (np.max(mu_dry) - np.min(mu_dry)) / 100) + + ygrid = np.arange(np.min(tp_dry), + np.max(tp_dry), + (np.max(tp_dry) - np.min(tp_dry)) / 100) + + # Fit regression + d = {'Dry_Tp': tp_dry, + 'Dry_Mu': mu_dry, + 'Drought_Occurrence':decadal_drought_occurence} + + df = pd.DataFrame(d) + df['Intercept'] = np.ones(np.shape(df)[0]) + df['Interaction'] = df['Dry_Tp'] * df['Dry_Mu'] + cols = ['Intercept'] + ['Dry_Mu'] + ['Dry_Tp'] + ['Interaction'] + ols = sm.OLS(df['Drought_Occurrence'], df[cols]) + result = ols.fit() + + # Calculate drought occurence for each grid point + X, Y = np.meshgrid(xgrid, ygrid) + x = X.flatten() + y = Y.flatten() + grid = np.column_stack([np.ones(len(x)), x, y, x * y]) + z = result.predict(grid) + z[z < 0.0] = 0.0 # replace negative shortage predictions with 0 + + +Let’s plot our results: + +.. code:: ipython3 + + # Set color gradient for response surface + drought_map = mpl.cm.get_cmap('RdBu_r') + + # Reshape our predicted drought occurrence and define bounds of colors + Z = np.reshape(z, np.shape(X)) + vmin = np.min([np.min(z), np.min(df['Drought_Occurrence'].values)]) + vmax = 15 + norm = mpl.colors.Normalize(vmin, vmax) + + # Plot response surface and CMIP5 projections + fig, ax = plt.subplots(figsize=(12, 8)) + ax.contourf(X, Y, Z, cmap=drought_map, norm=norm) + ax.scatter(CMIP5_multipliers.iloc[:,7], + CMIP5_multipliers.iloc[:,12], + c='#ffffb3', + edgecolor='none', + s=30) + cbar = ax.figure.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=drought_map), ax=ax) + ax.set_xlim(np.nanmin(X), np.nanmax(X)) + ax.set_ylim(np.nanmin(Y), np.nanmax(Y)) + ax.set_xlabel('Dry State Mu', fontsize=14) + ax.set_ylabel('Dry-Dry Transition Probability', fontsize=14) + ax.tick_params(axis='both', labelsize=14) + cbar.ax.set_ylabel('Decadal Drought Occurrence', rotation=-90, fontsize=14, labelpad=15) + cbar.ax.tick_params(axis='y',labelsize=14) + plt.show() + plt.close() + + + + + +.. image:: _static/hmm_53_0.png + + +We see the influence of the dry state mean and dry-dry transition +parameters. We’re likely to see more decadal droughts when we (1) +increase the dry-dry transition probability, which inherently will +increase persistence of the dry state, and (2) when we make the dry +state log mean drier. Note that the CMIP5 scenarios tend to span the +extent of the dry mean sample space, but are less representative of the +dry transition probability sample space, which suggests that the types +of hydrological droughts represented in the projections tend to only be +wetter to slightly drier than our baseline. Both methods of producing +these scenarios are valid, though studies have suggested that +globally-resolved GCMs may be inappropriate to represent regional +extremes. Ultimately, if your goal is to produce a variety of ensembles +that are characterized by many different drought characteristics, you +will likely find that a generator approach will serve this purpose +better. + +Tips to Create an HMM-Based Generator for your System +----------------------------------------------------- + +In this tutorial, we demonstrated how to fit an HMM-based generator for +a single gauge located in the Upper Colorado River Basin. In order to +apply this methodology to your problem, you will need to first ask: + +(1) Is this model appropriate for my location of interest? We have + applied this style of generator to locations where persistent wet + and dry states are characteristic, which tends to be in the Western + US. Ultimately the best way to judge if an HMM is useful for your + application is to fit the model and explore the resulting + distributions. Are there two (or more) distinct states that emerge? + If not, then your location may not exhibit the type of persistence + that an HMM-based generator is useful for. You can consider + exploring other styles of generators such as the Kirsch-Nowak + generator (Kirsch et al., 2013). + +(2) Do I have the right datasets? We use annual data for our location of + interest. In this notebook, the HMM is fit to log annual flows. + Ultimately, it can be disaggregated to daily flows (using a + reference historical daily dataset) to be useful in water resources + operational applications. You could also disaggregate to a finer + resolution than daily if the historical dataset exists. + +If you meet these requirements, feel free to proceed through fitting the +model using the code available in the notebook. Be sure to consider the +appropirate number of samples to generate (both in a stationary and +non-stationary case). Make sure that you test multiple sample sizes and +continue to increase your sample size until you converge to a consistent +representation of extremes. What is the appropriate number of LHS +samples of the parameters to use? In this experiment we used 1,000 +samples of parameters due to extensive stability tests described in +Quinn et al. (2020). + +Finally, to learn more about this test case refer to Hadmichael et +al. (2020a) and Hadmichael et al. (2020b). For another study on +synthetic drought generation to support vulnerability assessments in the +Research Triangle region of North Carolina, please refer to Herman et +al. (2016) + +References +---------- + +Ault, T. R., Cole, J. E., Overpeck, J. T., Pederson, G. T., & Meko, D. +M. (2014). Assessing the risk of persistent drought using climate model +simulations and paleoclimate data. Journal of Climate, 27(20), +7529-7549. + +CWCB (2012).Colorado River Water Availability Study Phase I Report. +Colorado Water Conservation Board + +Hadjimichael, A., Quinn, J., Wilson, E., Reed, P., Basdekas, L., Yates, +D., & Garrison, M. (2020a). Defining robustness, vulnerabilities, and +consequential scenarios for diverse stakeholder interests in +institutionally complex river basins. Earth’s Future, 8(7), +e2020EF001503. + +Hadjimichael, A., Quinn, J., & Reed, P. (2020). Advancing diagnostic +model evaluation to better understand water shortage mechanisms in +institutionally complex river basins. Water Resources Research, 56(10), +e2020WR028079. + +Herman, J. D., Zeff, H. B., Lamontagne, J. R., Reed, P. M., & +Characklis, G. W. (2016). Synthetic drought scenario generation to +support bottom-up water supply vulnerability assessments. Journal of +Water Resources Planning and Management, (11), 04016050. + +Kirsch, B. R., Characklis, G. W., & Zeff, H. B. (2013). Evaluating the +impact of alternative hydro-climate scenarios on transfer agreements: +Practical improvement for generating synthetic streamflows. Journal of +Water Resources Planning and Management, 139(4), 396-406. + +Overpeck, J.T. & Udall, B. (2020) “Climate change and the aridification +of North America.” Proceedings of the national academy of sciences +117.22 11856-11858. + +Quinn, J. D., Hadjimichael, A., Reed,P. M., & Steinschneider, S. (2020). +Canexploratory modeling of water scarcity vulnerabilities and robustness +bescenario neutral?Earth’s Future,8,e2020EF001650. +https://doi.org/10.1029/2020EF001650Received + +Williams, A. P., Cook, B. I., & Smerdon, J. E. (2022). Rapid +intensification of the emerging southwestern North American megadrought +in 2020–2021. Nature Climate Change, 12(3), 232-234. diff --git a/dev/docs/html/_sources/A2_Jupyter_Notebooks.rst b/dev/docs/html/_sources/A2_Jupyter_Notebooks.rst new file mode 100644 index 0000000..cfa77c0 --- /dev/null +++ b/dev/docs/html/_sources/A2_Jupyter_Notebooks.rst @@ -0,0 +1,12 @@ +.. _A2_Jupyter_Notebooks: + +************************** +Jupyter Notebook Tutorials +************************** + +.. include:: A2.1_fishgame.rst +.. include:: A2.2_saltelli.rst +.. include:: A2.3_logistic.rst +.. include:: A2.4_hymod.rst +.. include:: A2.5_discovery.rst +.. include:: A2.6_hmm.rst diff --git a/dev/docs/html/_sources/A3_plotting_code.rst b/dev/docs/html/_sources/A3_plotting_code.rst new file mode 100644 index 0000000..f3b3499 --- /dev/null +++ b/dev/docs/html/_sources/A3_plotting_code.rst @@ -0,0 +1,549 @@ +Plotting Code Samples +********************* + +hymod.ipynb +--------------------------------------- + +The following are the plotting functions as described in the ``hymod.ipynb`` Jupyter notebook tutorial. + +The following are the necessary package imports to run these functions: + +.. code-block:: python + + import numpy as np + import seaborn as sns + import matplotlib.pyplot as plt + + from matplotlib.lines import Line2D + + +**plot_observed_vs_simulated_streamflow()** +___________________________________________ + +.. code-block:: python + + def plot_observed_vs_simulated_streamflow(df, hymod_dict, figsize=[12, 6]): + """Plot observed versus simulated streamflow. + + :param df: Dataframe of hymod input data including columns for precip, potential evapotranspiration, + and streamflow + + :param hymod_dict: A dictionary of hymod outputs + :type hymod_dict: dict + + :param figsize: Matplotlib figure size + :type figsize: list + + """ + + # set plot style + plt.style.use('seaborn-white') + + # set up figure + fig, ax = plt.subplots(figsize=figsize) + + # plot observed streamflow + ax.plot(range(0, len(df['Strmflw'])), df['Strmflw'], color='pink') + + # plot simulated streamflow + ax.plot(range(0, len(df['Strmflw'])), hymod_dict['Q'], color='black') + + # set axis labels + ax.set_ylabel('Streamflow($m^3/s$)') + ax.set_xlabel('Days') + + # set plot title + plt.title('Observed vs. Simulated Streamflow') + + return ax + + +**plot_observed_vs_sensitivity_streamflow()** +_____________________________________________ + +.. code-block:: python + + def plot_observed_vs_sensitivity_streamflow(df_obs, df_sim, figsize=[10, 4]): + """Plot observed streamflow versus simulations generated from sensitivity analysis. + + :param df_obs: Dataframe of mean monthly hymod input data including columns for precip, + potential evapotranspiration, and streamflow + + :param df_sim: Dataframe of mean monthly simulation data from sensitivity analysis + + :param figsize: Matplotlib figure size + :type figsize: list + + """ + + month_list = range(len(df_sim)) + + # set up figure + fig, ax = plt.subplots(figsize=figsize) + + # set labels + ax.set_xlabel('Days') + ax.set_ylabel('Flow Discharge (m^3/s)') + + # plots all simulated streamflow cases under different sample sets + for i in df_sim.columns: + plt.plot(month_list, df_sim[i], color="pink", alpha=0.2) + + # plot observed streamflow + plt.plot(month_list, df_obs['Strmflw'], color="black") + + plt.title('Observed vs. Sensitivity Analysis Outputs') + + return ax + + +**plot_monthly_heatmap()** +__________________________ + +.. code-block:: python + + def plot_monthly_heatmap(arr_sim, df_obs, title='', figsize=[14, 6]): + """Plot a sensitivity metric overlain by observed flow. + + :param arr_sim: Numpy array of simulated metrics + + :param df_obs: Dataframe of mean monthly observed data from sensitivity analysis + + :param title: Title of plot + :type title: str + + :param figsize: Matplotlib figure size + :type figsize: list + + """ + + # set up figure + fig, ax = plt.subplots(figsize=figsize) + + # plot heatmap + sns.heatmap(arr_sim, + ax=ax, + yticklabels=['Kq', 'Ks', 'Alp', 'Huz', 'B'], + cmap=sns.color_palette("ch:s=-.2,r=.6")) + + # setup overlay axis + ax2 = ax.twinx() + + # plot line + ax2.plot(np.arange(0.5, 12.5), df_obs['Strmflw'], color='slateblue') + + # plot points on line + ax2.plot(np.arange(0.5, 12.5), df_obs['Strmflw'], color='slateblue', marker='o') + + # set axis limits and labels + ax.set_ylim(0, 5) + ax.set_xlim(0, 12) + ax.set_xticklabels(['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']) + ax2.set_ylabel('Flow Discharge($m^3/s$)') + + plt.title(title) + + plt.show() + + return ax, ax2 + + +**plot_annual_heatmap()** +__________________________ + +.. code-block:: python + + def plot_annual_heatmap(arr_sim, df_obs, title='', figsize=[14,5]): + """Plot a sensitivity metric overlain by observed flow.. + + :param arr_sim: Numpy array of simulated metrics + + :param df_obs: Dataframe of mean monthly observed data from sensitivity analysis + + :param title: Title of plot + :type title: str + + :param figsize: Matplotlib figure size + :type figsize: list + + """ + + # set up figure + fig, ax = plt.subplots(figsize=figsize) + + # plot heatmap + sns.heatmap(arr_sim, ax=ax, cmap=sns.color_palette("YlOrBr")) + + # setup overlay axis + ax2 = ax.twinx() + + # plot line + ax2.plot(np.arange(0.5, 10.5), df_obs['Strmflw'], color='slateblue') + + # plot points on line + ax2.plot(np.arange(0.5, 10.5), df_obs['Strmflw'], color='slateblue', marker='o') + + # set up axis lables and limits + ax.set_ylim(0, 5) + ax.set_xlim(0, 10) + ax.set_yticklabels(['Kq', 'Ks', 'Alp', 'Huz', 'B']) + ax.set_xticklabels(range(2000, 2010)) + ax2.set_ylabel('Flow Discharge($m^3/s$)') + + plt.title(title) + + return ax, ax2 + + +**plot_varying_heatmap()** +___________________________ + +.. code-block:: python + + def plot_varying_heatmap(arr_sim, df_obs, title='', figsize=[14,5]): + """Plot a sensitivity metric overlain by observed flow.. + + :param arr_sim: Numpy array of simulated metrics + + :param df_obs: Dataframe of mean monthly observed data from sensitivity analysis + + :param title: Title of plot + :type title: str + + :param figsize: Matplotlib figure size + :type figsize: list + + """ + + # set up figure + fig, ax = plt.subplots(figsize=figsize) + + # plot heatmap + sns.heatmap(arr_sim, + ax=ax, + yticklabels=['Kq', 'Ks', 'Alp', 'Huz', 'B'], + cmap=sns.light_palette("seagreen", as_cmap=True)) + + n_years = df_obs.shape[0] + + # setup overlay axis + ax2 = ax.twinx() + + # plot line + ax2.plot(range(0, n_years), df_obs['Strmflw'], color='slateblue') + + # plot points on line + ax2.plot(range(0, n_years), df_obs['Strmflw'], color='slateblue', marker='o') + + # set up axis lables and limits + ax.set_ylim(0, 5) + ax.set_xlim(-0.5, 119.5) + ax2.set_ylabel('Flow Discharge') + ax.set_xlabel('Number of Months') + + plt.title(title) + + return ax, ax2 + + +**plot_precalibration_flow()** +_______________________________ + +.. code-block:: python + + def plot_precalibration_flow(df_sim, df_obs, figsize=[10, 4]): + """Plot flow discharge provided by the ensemble of parameters sets from Pre-Calibration versus the observed + flow data. + + :param df_sim: Dataframe of simulated metrics + + :param df_obs: Dataframe of mean monthly observed data from sensitivity analysis + + :param figsize: Matplotlib figure size + :type figsize: list + + """ + + # set up figure + fig, ax = plt.subplots(figsize=figsize) + + # set axis labels + ax.set_xlabel('Days') + ax.set_ylabel('Flow Discharge') + + # plot pre-calibration results + for i in range(df_sim.shape[1]): + plt.plot(range(len(df_sim)), df_sim.iloc[:, i], color="lightgreen", alpha=0.2) + + # plot observed + plt.plot(range(len(df_sim)), df_obs['Strmflw'], color="black") + + plt.title('Observed vs. Pre-Calibration Outputs') + + # customize legend + custom_lines = [Line2D([0], [0], color="lightgreen", lw=4), + Line2D([0], [0], color="black", lw=4)] + plt.legend(custom_lines, ['Pre-Calibration', 'Observed']) + + return ax + + +**plot_precalibration_glue()** +_______________________________ + +.. code-block:: python + + def plot_precalibration_glue(df_precal, df_glue, df_obs, figsize=[10, 4]): + """Plot flow discharge provided by the ensemble of parameters sets from Pre-Calibration versus the observed + flow data. + + :param df_sim: Dataframe of simulated metrics + + :param df_obs: Dataframe of mean monthly observed data from sensitivity analysis + + :param figsize: Matplotlib figure size + :type figsize: list + + """ + + # set up figure + fig, ax = plt.subplots(figsize=figsize) + + # set axis labels + ax.set_xlabel('Days') + ax.set_ylabel('Flow Discharge') + + # plot pre-calibration results + for i in range(df_precal.shape[1]): + plt.plot(range(len(df_precal)), df_precal.iloc[:, i], color="lightgreen", alpha=0.2) + + # plot glue + for i in range(df_glue.shape[1]): + plt.plot(range(len(df_glue)), df_glue.iloc[:, i], color="lightblue", alpha=0.2) + + # plot observed + plt.plot(range(len(df_precal)), df_obs['Strmflw'], color="black") + + plt.title('Observed vs. Sensitivity Analysis Outputs across GLUE/Pre-Calibration') + + # customize legend + custom_lines = [Line2D([0], [0], color="lightgreen", lw=4), + Line2D([0], [0], color="lightblue", lw=4), + Line2D([0], [0], color="black", lw=4)] + plt.legend(custom_lines, ['Pre-Calibration', 'GLUE', 'Observed']) + + return ax + + +fishery_dynamics.ipynb +--------------------------------------- + +The following are the plotting functions as described in the ``fishery_dynamics.ipynb`` Jupyter notebook tutorial. + +The following are the necessary package imports to run these functions: + +.. code-block:: python + + import numpy as np + import matplotlib.pyplot as plt + + from matplotlib import patheffects as pe + + +**plot_objective_performance()** +_________________________________ + +.. code-block:: python + + def plot_objective_performance(objective_performance, profit_solution, robust_solution, figsize=(18, 9)): + """Plot the identified solutions with regards to their objective performance + in a parallel axis plot + + :param objective_performance: Objective performance array + :param profit_solution: Profitable solutions array + :param robust_solution: Robust solutions array + :param figsize: Figure size + :type figsize: tuple + + """ + + # create the figure object + fig = plt.figure(figsize=figsize) + + # set up subplot axis object + ax = fig.add_subplot(1, 1, 1) + + # labels where constraint is always 0 + objs_labels = ['Net present\nvalue (NPV)', + 'Prey population deficit', + 'Longest duration\nof low harvest', + 'Worst harvest instance', + 'Variance of harvest', + 'Duration of predator\npopulation collapse'] + + # normalization across objectives + mins = objective_performance.min(axis=0) + maxs = objective_performance.max(axis=0) + norm_reference = objective_performance.copy() + + for i in range(5): + mm = objective_performance[:, i].min() + mx = objective_performance[:, i].max() + if mm != mx: + norm_reference[:, i] = (objective_performance[:, i] - mm) / (mx - mm) + else: + norm_reference[:, i] = 1 + + # colormap from matplotlib + cmap = plt.cm.get_cmap("Blues") + + # plot all solutions + for i in range(len(norm_reference[:, 0])): + ys = np.append(norm_reference[i, :], 1.0) + xs = range(len(ys)) + ax.plot(xs, ys, c=cmap(ys[0]), linewidth=2) + + # to highlight robust solutions + ys = np.append(norm_reference[profit_solution, :], 1.0) # Most profitable + xs = range(len(ys)) + l1 = ax.plot(xs[0:6], + ys[0:6], + c=cmap(ys[0]), + linewidth=3, + label='Most robust in NPV', + path_effects=[pe.Stroke(linewidth=6, foreground='darkgoldenrod'), pe.Normal()]) + + ys = np.append(norm_reference[robust_solution, :], 1.0) # Most robust in all criteria + xs = range(len(ys)) + l2 = ax.plot(xs[0:6], + ys[0:6], + c=cmap(ys[0]), + linewidth=3, + label='Most robust across criteria', + path_effects=[pe.Stroke(linewidth=6, foreground='gold'), pe.Normal()]) + + # build colorbar + sm = plt.cm.ScalarMappable(cmap=cmap) + sm.set_array([objective_performance[:, 0].min(), objective_performance[:, 0].max()]) + cbar = fig.colorbar(sm) + cbar.ax.set_ylabel("\nNet present value (NPV)") + + # tick values + minvalues = ["{0:.3f}".format(mins[0]), + "{0:.3f}".format(-mins[1]), + str(-mins[2]), + "{0:.3f}".format(-mins[3]), + "{0:.2f}".format(-mins[4]), + str(0)] + + maxvalues = ["{0:.2f}".format(maxs[0]), + "{0:.3f}".format(-maxs[1]), + str(-maxs[2]), + "{0:.2f}".format(maxs[3]), + "{0:.2f}".format(-maxs[4]), + str(0)] + + ax.set_ylabel("Preference ->", size=12) + ax.set_yticks([]) + ax.set_xticks([0, 1, 2, 3, 4, 5]) + ax.set_xticklabels([minvalues[i] + '\n' + objs_labels[i] for i in range(len(objs_labels))]) + + # make a twin axis for toplabels + ax1 = ax.twiny() + ax1.set_yticks([]) + ax1.set_xticks([0, 1, 2, 3, 4, 5]) + ax1.set_xticklabels([maxvalues[i] for i in range(len(maxs) + 1)]) + + return ax, ax1 + + +**plot_factor_performance()** +_________________________________ + +.. code-block:: python + + def plot_factor_performance(param_values, collapse_days, b, m, a): + """Visualize the performance of our policies in three-dimensional + parametric space. + + :param param_values: Saltelli sample array + :param collapse_days: Simulation array + :param b: b parameter boundary interval + :param m: m parameter boundary interval + :param a: a parameter boundary interval + + """ + + # set colormap + cmap = plt.cm.get_cmap("RdBu_r") + + # build figure object + fig = plt.figure(figsize=plt.figaspect(0.5), dpi=600, constrained_layout=True) + + # set up scalable colormap + sm = plt.cm.ScalarMappable(cmap=cmap) + + # set up subplot for profit maximizing policy + ax1 = fig.add_subplot(1, 2, 1, projection='3d') + + # add point data for profit plot + sows = ax1.scatter(param_values[:,1], + param_values[:,6], + param_values[:,0], + c=collapse_days[:,0], + cmap=cmap, + s=0.5) + + # add surface data for boundary separating successful and failed states of the world + pts_ineq = ax1.plot_surface(b, m, a, color='black', alpha=0.25, zorder=1) + + # add reference point to plot + pt_ref = ax1.scatter(0.5, 0.7, 0.005, c='black', s=50, zorder=0) + + # set up plot aesthetics and labels + ax1.set_xlabel("b") + ax1.set_ylabel("m") + ax1.set_zlabel("a") + ax1.set_zlim([0.0, 2.0]) + ax1.set_xlim([0.0, 1.0]) + ax1.set_ylim([0.0, 1.5]) + ax1.xaxis.set_view_interval(0, 0.5) + ax1.set_facecolor('white') + ax1.view_init(12, -17) + ax1.set_title('Profit maximizing policy') + + # set up subplot for robust policy + ax2 = fig.add_subplot(1, 2, 2, projection='3d') + + # add point data for robust plot + sows = ax2.scatter(param_values[:,1], + param_values[:,6], + param_values[:,0], + c=collapse_days[:,1], + cmap=cmap, + s=0.5) + + # add surface data for boundary separating successful and failed states of the world + pts_ineq = ax2.plot_surface(b, m, a, color='black', alpha=0.25, zorder=1) + + # add reference point to plot + pt_ref = ax2.scatter(0.5, 0.7, 0.005, c='black', s=50, zorder=0) + + # set up plot aesthetics and labels + ax2.set_xlabel("b") + ax2.set_ylabel("m") + ax2.set_zlabel("a") + ax2.set_zlim([0.0, 2.0]) + ax2.set_xlim([0.0, 1.0]) + ax2.set_ylim([0.0, 1.5]) + ax2.xaxis.set_view_interval(0, 0.5) + ax2.set_facecolor('white') + ax2.view_init(12, -17) + ax2.set_title('Robust policy') + + # set up colorbar + sm.set_array([collapse_days.min(), collapse_days.max()]) + cbar = fig.colorbar(sm) + cbar.set_label('Days with predator collapse') + + return ax1, ax2 diff --git a/dev/docs/html/_sources/R.Bibliography.rst b/dev/docs/html/_sources/R.Bibliography.rst new file mode 100644 index 0000000..60cc211 --- /dev/null +++ b/dev/docs/html/_sources/R.Bibliography.rst @@ -0,0 +1,6 @@ +============ +Bibliography +============ + +.. bibliography:: + :style: unsrt diff --git a/dev/docs/html/_sources/acknowledgements.rst b/dev/docs/html/_sources/acknowledgements.rst new file mode 100644 index 0000000..1f34009 --- /dev/null +++ b/dev/docs/html/_sources/acknowledgements.rst @@ -0,0 +1,25 @@ + +Acknowledgements +################ + +This e-book was developed by the `Integrated Multisector, Multiscale Modeling (IM3) project `_, supported by the `U.S. Department of Energy `_, `Office of Science `_, as part of research in the `MultiSector Dynamics `_, Earth and Environmental System Modeling Program. + +The authors would like to thank Casey Burleyson (PNNL) and Isaac Thompson (PNNL) for their insightful feedback on drafts of the text and tutorials. + +Open source under license `CC BY-NC-ND-4.0 `_. + +Portions of cover image by `V.Cid7413 `_, distributed under a `CC-BY-4.0 `_ license. + +Open Source Disclaimer: + + This material was prepared as an account of work sponsored by an agency of the United States Government. Neither the United States Government nor the United States Department of Energy, nor Battelle, nor any of their employees, nor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty, express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or any information, apparatus, product, software, or process disclosed, or represents that its use would not infringe privately owned rights. + Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or any agency thereof, or Battelle Memorial Institute. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof. + +| PACIFIC NORTHWEST NATIONAL LABORATORY +| operated by +| BATTELLE +| for the +| UNITED STATES DEPARTMENT OF ENERGY +| under Contract DE-AC05-76RL01830 + +Copyright (c) 2022-present; Battelle Memorial Institute diff --git a/dev/docs/html/_sources/citation.rst b/dev/docs/html/_sources/citation.rst new file mode 100644 index 0000000..63ee0ef --- /dev/null +++ b/dev/docs/html/_sources/citation.rst @@ -0,0 +1,29 @@ +Suggested Citation +################### + + Reed, P.M., Hadjimichael, A., Malek, K., Karimi, T., Vernon, C.R., Srikrishnan, V., Gupta, R.S., Gold, D.F., Lee, B., Keller, K., Thurber, T.B, & Rice, J.S. (2022). Addressing Uncertainty in Multisector Dynamics Research [Book]. Zenodo. https://doi.org/10.5281/zenodo.6110623 + +.. only:: html + + .. raw:: html + + \ No newline at end of file diff --git a/dev/docs/html/_sources/code_of_conduct.rst b/dev/docs/html/_sources/code_of_conduct.rst new file mode 100644 index 0000000..56672b8 --- /dev/null +++ b/dev/docs/html/_sources/code_of_conduct.rst @@ -0,0 +1,58 @@ +Code of Conduct +=============== + +Our Pledge +---------- + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +Our Standards +------------- + +Examples of behavior that contributes to a positive environment for our +community include: + +- Demonstrating empathy and kindness toward other people +- Being respectful of differing opinions, viewpoints, and experiences +- Giving and gracefully accepting constructive feedback +- Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +- Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +- The use of sexualized language or imagery, and sexual attention or + advances of any kind +- Trolling, insulting or derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or email + address, without their explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +Scope +----- + +This Code of Conduct applies within all community spaces, and also applies +when an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +Attribution +----------- + +This Code of Conduct is adapted from the Contributor Covenant, version 2.0, +available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. diff --git a/dev/docs/html/_sources/contributing.rst b/dev/docs/html/_sources/contributing.rst new file mode 100644 index 0000000..e1e1227 --- /dev/null +++ b/dev/docs/html/_sources/contributing.rst @@ -0,0 +1,18 @@ +****************** +Contribution Guide +****************** + +Our eBook is a living product that we hope continues to grow over time to stay relevant with methodological and technological advancements in the field of uncertainty quantification and MultiSector Dynamics at large. We extend an invitation to you the reader to contribute to one of our interactive Jupyter notebook tutorials. If you feel you have a contribution that would be relevant and generalizable to the MSD community, please submit a proposal idea `here `_. Your proposal will be reviewing by our team and feedback and/or a decision will be provided shortly. Any contribution accepted will receive it's own DOI and citation so that you may provide an independent reference to your work as you see fit. + +Please consider the following requirements for contribution: + +- All elements of your contribution MUST be fully open-source and can be distributed with an `Open Source Initiative approved license `_ with an understanding that they may be used in community demonstrations and other activities. Author citations will be present in the notebook for any contributed work to ensure the author(s) receive full credit for their contribution. +- Any data or code reused in your submission must be correctly cited giving credit to the original authors. +- The notebook provided must be written in English and able to be a stand-alone product that needs no further explanation past what is written in the notebook to make use of it. +- The provided work is not merely a regurgitation of an existing tutorial or demonstration but represents a novel contribution. +- All contributions and communication thereof must abide by our `code of conduct `_. + + +If you feel your work meets the criteria above, please submit a proposal issue `here `_. If your proposal is approved, please create a pull request with the submission `template `_ copied into the pull request description and filled out. We will then review your pull request and provide feedback. Once your contribution has been deemed ready to deploy, we will generate a DOI for your work, launch it to our MSD-LIVE set of interactive notebooks, and feature the contribution in the index of our eBook. + +Please feel free to reach out with any further questions. diff --git a/dev/docs/html/_sources/examples.rst b/dev/docs/html/_sources/examples.rst new file mode 100644 index 0000000..ca7125a --- /dev/null +++ b/dev/docs/html/_sources/examples.rst @@ -0,0 +1,44 @@ +======== +Examples +======== + + +Using mathjax +=================== + +The **first-order sensitivity index** indicates the percent of model output variance contributed by a factor individually (i.e., the effect of varying *x*\ :sub:`i`\ alone) and is obtained using the following (Saltelli, 2002a; Sobol, 2001): + +.. math:: + + S_i^1 = \frac{V_{x_i} [E_{x\sim_i} (x_i)]}{V(y)} + +with *E* and *V* denoting the expected value and the variance, respectively. + +Building a codeblock +======================= + +.. code-block:: python + :linenos: + + import ebook + + ebook.plot_experimental_design() + +Testing out a note +================== + +.. note:: + Keep track of the latest in programming solutions on the `Water Programming `_ blog! + + +Insert a figure with a caption +============================== + +Be sure to add your figure into 'msd_uncertainty_ebook/docs/source/_static' + + .. figure:: _static/im3.png + :alt: IM3 logo + :width: 100px + :align: center + + This is my caption. diff --git a/dev/docs/html/_sources/index.rst b/dev/docs/html/_sources/index.rst new file mode 100644 index 0000000..170c127 --- /dev/null +++ b/dev/docs/html/_sources/index.rst @@ -0,0 +1,134 @@ +.. msd_uncertainty_ebook documentation master file, created by + sphinx-quickstart on Wed May 26 22:27:12 2021. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + + +======================================================= +Addressing Uncertainty in MultiSector Dynamics Research +======================================================= + +.. only:: html + + .. epigraph:: + + Patrick M. Reed, Antonia Hadjimichael, Keyvan Malek, + Tina Karimi, Chris R. Vernon, Vivek Srikrishnan, Rohini S. Gupta, + David F. Gold, Ben Lee, Klaus Keller, Travis B. Thurber, Jennie S. Rice + + A practical guide to sensitivity analysis and diagnostic model evaluation techniques for confronting the computational and conceptual challenges of multi-model, transdisciplinary workflows. + + **Topics in the Book** + + * `Preface `_ + * `Introduction <1_introduction.html>`_ + * `An overview of diagnostic modeling and perspectives on model evaluation <2_diagnostic_modeling_overview_and_perspectives.html>`_ + * `A framework for the basic methods and concepts used in sensitivity analysis <3_sensitivity_analysis_the_basics.html>`_ + * `Technical applications supporting diagnostic model evaluation and exploration <4_sensitivity_analysis_diagnostic_and_exploratory_modeling.html>`_ + + **Interactive Tutorials** + + * `Factor Discovery `_ + * `Model Calibration `_ + * `Sobol Sensitivity Analysis `_ + * `Factor Mapping using Logistic Regression `_ + * `Time-evolving scenario discovery for infrastructure pathways `_ + * `A Hidden-Markov Modeling Approach to Creating Synthetic Streamflow Scenarios `_ + + .. tip:: + + .. raw:: html + +

+ Use the sidebar on the left to quickly navigate the eBook! +
+ Click or tap the icon to show and hide the sidebar. +

+ + .. admonition:: Info + + .. raw:: html + +

+ Report a typo or just pass along something you like about the book by opening an issue on GitHub! +
+ Click or tap the icon to find this link again. +

+ +.. raw:: latex + + \frontmatter + \sphinxmaketitle + +.. toctree:: + :hidden: + :glob: + + preface + citation + acknowledgements + code_of_conduct + contributing + +.. raw:: latex + + \sphinxtableofcontents + +.. raw:: latex + + \mainmatter + +.. toctree:: + :hidden: + :includehidden: + :numbered: 4 + :maxdepth: 4 + :caption: Contents + :name: mastertoc + :glob: + + 1_introduction + 2_diagnostic_modeling_overview_and_perspectives + 3_sensitivity_analysis_the_basics + 4_sensitivity_analysis_diagnostic_and_exploratory_modeling + 5_conclusion + +.. raw:: latex + + \appendix + +.. appendix:: + :hidden: + :numbered: 4 + :maxdepth: 4 + :caption: Appendices + :glob: + + A1_Uncertainty_Quantification + A2_Jupyter_Notebooks + A3_plotting_code + +.. raw:: latex + + \backmatter + +.. toctree:: + :hidden: + :maxdepth: 1 + :caption: Glossary + :glob: + + 6_glossary + + +.. raw:: latex + + \backmatter + +.. toctree:: + :hidden: + :maxdepth: 1 + :caption: References + :glob: + + R.Bibliography diff --git a/dev/docs/html/_sources/modules.rst b/dev/docs/html/_sources/modules.rst new file mode 100644 index 0000000..c62245d --- /dev/null +++ b/dev/docs/html/_sources/modules.rst @@ -0,0 +1,6 @@ +nanites +======= + +.. toctree:: + :maxdepth: 4 + diff --git a/dev/docs/html/_sources/preface.rst b/dev/docs/html/_sources/preface.rst new file mode 100644 index 0000000..c4415a7 --- /dev/null +++ b/dev/docs/html/_sources/preface.rst @@ -0,0 +1,34 @@ + +.. _preface: + +******* +Preface +******* + +This online book is meant to provide an open science “living” resource on uncertainty characterization methods for the MultiSector Dynamics (MSD) community and other technical communities confronting sustainability, climate, and energy transition challenges. The last decade has seen rapid growth in science efforts seeking to address the interconnected nature of these challenges across scales, sectors, and systems. Accompanying these advances is the growing realization that the deep integration of research from many disciplinary fields is non-trivial and raises important questions. How and why models are developed seems to have an obvious answer (“to gain understanding”). But what does it actually mean to gain understanding? What if a small change in a model or its data fundamentally changes our perceptions of what we thought we understood? What controls the outcomes of our model(s)? How do we understand the implications of model coupling, such as when one model is on the receiving end of several other models that are considered “input data”? + +The often quoted “All models are wrong, but some are useful.” (George Box) is a bit of a conflation trap, often used to excuse known weaknesses in complex models as just an unavoidable outcome of being a modeler. In fact, the quote actually refers to a specific class of small-scale statistical models within an application context that assures a much higher degree of understanding and data quality control than is typical for the coupled human-natural systems applications in the MSD area. Moreover, Box was actually warning readers to avoid overparameterization and emphasizing the need to better understand what underlying factors cause your model to be wrong :cite:p:`box_1976`. + +So, in short, there is a tension when attaining better performance by means of increasing the complexity of a model or model-based workflow. Box highlights that a modeler requires a clear diagnostic understanding of this performance-complexity tradeoff. If we move from small-scale models simulating readily-observed phenomena to the MSD context, things get quite a bit more complicated. How can we provide robust insights for unseen futures that emerge across a myriad of human and natural systems? Sometimes even asking, “what is a model?” or “what is data?” is complicated (e.g., data assimilated weather products, satellite-based signals translated through retrieval algorithms, demographic changes, resource demands, etc.). This MSD guidance text seeks to help readers navigate these challenges. It is meant to serve as an evolving resource that helps the MSD community learn how to better address uncertainty while working with complex chains of models bridging sectors, scales, and systems. It is not intended to be an exhaustive resource, but instead should be seen as a guided tour through state-of-the-science methods in uncertainty characterization, including global sensitivity analysis and exploratory modeling, to provide insights into complex human-natural systems interactions. + +To aid readers in navigating the text, the key goals for each chapter are summarized below. + +:numref:`introduction` uses the `Integrated Multisector Multiscale Modeling `_ project as a living lab to encapsulate the challenges that emerge in bridging disciplines to make consequential model-based insights while acknowledging the tremendous array of uncertainties that shape them. + +:numref:`2_diagnostic_modeling` helps the reader to better understand the importance of using diagnostic modeling to interrogate why uncertain model behaviors may emerge. The chapter also aids readers to better understand the diverse disciplinary perspectives that exist on how best to pursue consequential model-based discoveries. + +:numref:`3_sensitivity_analysis_the_basics` is a technical tools-focused primer for readers on the key elements of uncertainty characterization that includes ensemble-based design of experiments, quantitative methods for computing global sensitivities, and a summary of existing software packages. + +:numref:`4_sensitivity_analysis` narrates for readers how and why the tools from the previous chapter can be applied in a range of tasks from diagnosing model performance to formal exploratory modeling methods for making consequential model-based discoveries. + +The supplemental appendices provided in the text are also important resources for readers. They provide a glossary to help bridge terminology challenges, a brief summary of uncertainty quantification tools for more advanced readers, and a suite of Jupyter notebook tutorials that provide hands-on training tied to the contents of :numref:`3_sensitivity_analysis_the_basics` and :numref:`4_sensitivity_analysis`. + +This text was written with a number of different audiences in mind. + +Technical experts in uncertainty may find this to be a helpful and unique resource bridging a number of perspectives that have not been combined in prior books (e.g., formal model diagnostics, global sensitivity analysis, and exploratory modeling under deep uncertainty). + +Readers from different sector-specific and disciplinary-specific backgrounds can use this text to better understand potential differences and opportunities in how to make model-based insights. + +Academic or junior researchers can utilize this freely available text for training and teaching resources that include hands-on coding experiences. + +This text itself represents our strong commitment to open science and will evolve as a living resource as the communities of researchers provide feedback, innovations, and future tools. diff --git a/dev/docs/html/_static/1-lede_pix_-_usbr-powell-8517152024_aa66437c2e_o-cropped.jpg b/dev/docs/html/_static/1-lede_pix_-_usbr-powell-8517152024_aa66437c2e_o-cropped.jpg new file mode 100644 index 0000000..9568f79 Binary files /dev/null and b/dev/docs/html/_static/1-lede_pix_-_usbr-powell-8517152024_aa66437c2e_o-cropped.jpg differ diff --git a/dev/docs/html/_static/Figure_1.png b/dev/docs/html/_static/Figure_1.png new file mode 100644 index 0000000..d7cbbb4 Binary files /dev/null and b/dev/docs/html/_static/Figure_1.png differ diff --git a/dev/docs/html/_static/HMM_example.png b/dev/docs/html/_static/HMM_example.png new file mode 100644 index 0000000..38d2bd1 Binary files /dev/null and b/dev/docs/html/_static/HMM_example.png differ diff --git a/dev/docs/html/_static/Map_small.png b/dev/docs/html/_static/Map_small.png new file mode 100644 index 0000000..392c962 Binary files /dev/null and b/dev/docs/html/_static/Map_small.png differ diff --git a/dev/docs/html/_static/Policy_MonteCarlo_Pathways_small.png b/dev/docs/html/_static/Policy_MonteCarlo_Pathways_small.png new file mode 100644 index 0000000..5879e4c Binary files /dev/null and b/dev/docs/html/_static/Policy_MonteCarlo_Pathways_small.png differ diff --git a/dev/docs/html/_static/PolicyandPathways_small.png b/dev/docs/html/_static/PolicyandPathways_small.png new file mode 100644 index 0000000..53fe2f3 Binary files /dev/null and b/dev/docs/html/_static/PolicyandPathways_small.png differ diff --git a/dev/docs/html/_static/basic.css b/dev/docs/html/_static/basic.css new file mode 100644 index 0000000..2af6139 --- /dev/null +++ b/dev/docs/html/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 270px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/dev/docs/html/_static/basin_map.png b/dev/docs/html/_static/basin_map.png new file mode 100644 index 0000000..5cb0dfe Binary files /dev/null and b/dev/docs/html/_static/basin_map.png differ diff --git a/dev/docs/html/_static/custom.css b/dev/docs/html/_static/custom.css new file mode 100644 index 0000000..9cfb4d4 --- /dev/null +++ b/dev/docs/html/_static/custom.css @@ -0,0 +1,3 @@ +h1#site-title.site-logo { + text-align: left; +} \ No newline at end of file diff --git a/dev/docs/html/_static/custom.js b/dev/docs/html/_static/custom.js new file mode 100644 index 0000000..86a81e1 --- /dev/null +++ b/dev/docs/html/_static/custom.js @@ -0,0 +1,4 @@ +$(document).ready(function () { + $('a.repository-button').attr('target', '_blank'); + $('a.issues-button').attr('target', '_blank').attr('href', 'https://github.com/IMMM-SFA/msd_uncertainty_ebook/issues/new?assignees=thurber%2C+crvernon&labels=documentation%2C+triage&template=custom.md&title=Publication+Feedback'); +}); \ No newline at end of file diff --git a/dev/docs/html/_static/discovery_12_0.png b/dev/docs/html/_static/discovery_12_0.png new file mode 100644 index 0000000..9dbf81c Binary files /dev/null and b/dev/docs/html/_static/discovery_12_0.png differ diff --git a/dev/docs/html/_static/discovery_16_1.png b/dev/docs/html/_static/discovery_16_1.png new file mode 100644 index 0000000..9389050 Binary files /dev/null and b/dev/docs/html/_static/discovery_16_1.png differ diff --git a/dev/docs/html/_static/discovery_4_1.png b/dev/docs/html/_static/discovery_4_1.png new file mode 100644 index 0000000..e11e798 Binary files /dev/null and b/dev/docs/html/_static/discovery_4_1.png differ diff --git a/dev/docs/html/_static/discovery_6_0.png b/dev/docs/html/_static/discovery_6_0.png new file mode 100644 index 0000000..80c9bbd Binary files /dev/null and b/dev/docs/html/_static/discovery_6_0.png differ diff --git a/dev/docs/html/_static/discovery_6_1.png b/dev/docs/html/_static/discovery_6_1.png new file mode 100644 index 0000000..2b4c435 Binary files /dev/null and b/dev/docs/html/_static/discovery_6_1.png differ diff --git a/dev/docs/html/_static/discovery_9_1.png b/dev/docs/html/_static/discovery_9_1.png new file mode 100644 index 0000000..2fbbe91 Binary files /dev/null and b/dev/docs/html/_static/discovery_9_1.png differ diff --git a/dev/docs/html/_static/discovery_9_2.png b/dev/docs/html/_static/discovery_9_2.png new file mode 100644 index 0000000..4dc4c74 Binary files /dev/null and b/dev/docs/html/_static/discovery_9_2.png differ diff --git a/dev/docs/html/_static/doctools.js b/dev/docs/html/_static/doctools.js new file mode 100644 index 0000000..4d67807 --- /dev/null +++ b/dev/docs/html/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/dev/docs/html/_static/documentation_options.js b/dev/docs/html/_static/documentation_options.js new file mode 100644 index 0000000..dab586c --- /dev/null +++ b/dev/docs/html/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/dev/docs/html/_static/eqn2.png b/dev/docs/html/_static/eqn2.png new file mode 100644 index 0000000..881e6da Binary files /dev/null and b/dev/docs/html/_static/eqn2.png differ diff --git a/dev/docs/html/_static/eqn4.png b/dev/docs/html/_static/eqn4.png new file mode 100644 index 0000000..2e6d6b1 Binary files /dev/null and b/dev/docs/html/_static/eqn4.png differ diff --git a/dev/docs/html/_static/figure14lake_problem_SD.png b/dev/docs/html/_static/figure14lake_problem_SD.png new file mode 100644 index 0000000..0045f50 Binary files /dev/null and b/dev/docs/html/_static/figure14lake_problem_SD.png differ diff --git a/dev/docs/html/_static/figure1_1_state_of_the_science.png b/dev/docs/html/_static/figure1_1_state_of_the_science.png new file mode 100644 index 0000000..703b1f6 Binary files /dev/null and b/dev/docs/html/_static/figure1_1_state_of_the_science.png differ diff --git a/dev/docs/html/_static/figure2_1_idealized_uc.png b/dev/docs/html/_static/figure2_1_idealized_uc.png new file mode 100644 index 0000000..17aacf1 Binary files /dev/null and b/dev/docs/html/_static/figure2_1_idealized_uc.png differ diff --git a/dev/docs/html/_static/figure3_1_global_versus_local.png b/dev/docs/html/_static/figure3_1_global_versus_local.png new file mode 100644 index 0000000..ea57348 Binary files /dev/null and b/dev/docs/html/_static/figure3_1_global_versus_local.png differ diff --git a/dev/docs/html/_static/figure3_2_factor_mapping.png b/dev/docs/html/_static/figure3_2_factor_mapping.png new file mode 100644 index 0000000..3c0d4dd Binary files /dev/null and b/dev/docs/html/_static/figure3_2_factor_mapping.png differ diff --git a/dev/docs/html/_static/figure3_3_alternative_designs.png b/dev/docs/html/_static/figure3_3_alternative_designs.png new file mode 100644 index 0000000..85bd6f2 Binary files /dev/null and b/dev/docs/html/_static/figure3_3_alternative_designs.png differ diff --git a/dev/docs/html/_static/figure3_4_morris_method.png b/dev/docs/html/_static/figure3_4_morris_method.png new file mode 100644 index 0000000..1a914b0 Binary files /dev/null and b/dev/docs/html/_static/figure3_4_morris_method.png differ diff --git a/dev/docs/html/_static/figure3_5classificationofmethods.png b/dev/docs/html/_static/figure3_5classificationofmethods.png new file mode 100644 index 0000000..af95502 Binary files /dev/null and b/dev/docs/html/_static/figure3_5classificationofmethods.png differ diff --git a/dev/docs/html/_static/figure3_6_softwaretoolkits.png b/dev/docs/html/_static/figure3_6_softwaretoolkits.png new file mode 100644 index 0000000..2903cec Binary files /dev/null and b/dev/docs/html/_static/figure3_6_softwaretoolkits.png differ diff --git a/dev/docs/html/_static/figure4_1_diagnostic_workflow.png b/dev/docs/html/_static/figure4_1_diagnostic_workflow.png new file mode 100644 index 0000000..aa83f53 Binary files /dev/null and b/dev/docs/html/_static/figure4_1_diagnostic_workflow.png differ diff --git a/dev/docs/html/_static/figure4_2_behavior_modes.png b/dev/docs/html/_static/figure4_2_behavior_modes.png new file mode 100644 index 0000000..11fec22 Binary files /dev/null and b/dev/docs/html/_static/figure4_2_behavior_modes.png differ diff --git a/dev/docs/html/_static/figure4_3_lake_problem_fluxes.png b/dev/docs/html/_static/figure4_3_lake_problem_fluxes.png new file mode 100644 index 0000000..3df337e Binary files /dev/null and b/dev/docs/html/_static/figure4_3_lake_problem_fluxes.png differ diff --git a/dev/docs/html/_static/figure4_4_exploratory_workflow.png b/dev/docs/html/_static/figure4_4_exploratory_workflow.png new file mode 100644 index 0000000..3820f4b Binary files /dev/null and b/dev/docs/html/_static/figure4_4_exploratory_workflow.png differ diff --git a/dev/docs/html/_static/figure4_factor_mapping.png b/dev/docs/html/_static/figure4_factor_mapping.png new file mode 100644 index 0000000..2d1e374 Binary files /dev/null and b/dev/docs/html/_static/figure4_factor_mapping.png differ diff --git a/dev/docs/html/_static/figure5_alternative_designs.png b/dev/docs/html/_static/figure5_alternative_designs.png new file mode 100644 index 0000000..d5962a1 Binary files /dev/null and b/dev/docs/html/_static/figure5_alternative_designs.png differ diff --git a/dev/docs/html/_static/figureA1_1_UQ_approaches.png b/dev/docs/html/_static/figureA1_1_UQ_approaches.png new file mode 100644 index 0000000..8828f51 Binary files /dev/null and b/dev/docs/html/_static/figureA1_1_UQ_approaches.png differ diff --git a/dev/docs/html/_static/figureA1_2_bootstrap_workflow.png b/dev/docs/html/_static/figureA1_2_bootstrap_workflow.png new file mode 100644 index 0000000..7f4c970 Binary files /dev/null and b/dev/docs/html/_static/figureA1_2_bootstrap_workflow.png differ diff --git a/dev/docs/html/_static/figureA1_3_precal_workflow.png b/dev/docs/html/_static/figureA1_3_precal_workflow.png new file mode 100644 index 0000000..afca549 Binary files /dev/null and b/dev/docs/html/_static/figureA1_3_precal_workflow.png differ diff --git a/dev/docs/html/_static/figureA1_4_mcmc_workflow.png b/dev/docs/html/_static/figureA1_4_mcmc_workflow.png new file mode 100644 index 0000000..2f54a0f Binary files /dev/null and b/dev/docs/html/_static/figureA1_4_mcmc_workflow.png differ diff --git a/dev/docs/html/_static/figureA1_5_priors_posteriors.png b/dev/docs/html/_static/figureA1_5_priors_posteriors.png new file mode 100644 index 0000000..455edfd Binary files /dev/null and b/dev/docs/html/_static/figureA1_5_priors_posteriors.png differ diff --git a/dev/docs/html/_static/file.png b/dev/docs/html/_static/file.png new file mode 100644 index 0000000..a858a41 Binary files /dev/null and b/dev/docs/html/_static/file.png differ diff --git a/dev/docs/html/_static/fishery_output_22_0.png b/dev/docs/html/_static/fishery_output_22_0.png new file mode 100644 index 0000000..127183f Binary files /dev/null and b/dev/docs/html/_static/fishery_output_22_0.png differ diff --git a/dev/docs/html/_static/fishery_output_6_0.png b/dev/docs/html/_static/fishery_output_6_0.png new file mode 100644 index 0000000..4588dd5 Binary files /dev/null and b/dev/docs/html/_static/fishery_output_6_0.png differ diff --git a/dev/docs/html/_static/hmm_11_0.png b/dev/docs/html/_static/hmm_11_0.png new file mode 100644 index 0000000..d9d8ab2 Binary files /dev/null and b/dev/docs/html/_static/hmm_11_0.png differ diff --git a/dev/docs/html/_static/hmm_14_0.png b/dev/docs/html/_static/hmm_14_0.png new file mode 100644 index 0000000..dec4196 Binary files /dev/null and b/dev/docs/html/_static/hmm_14_0.png differ diff --git a/dev/docs/html/_static/hmm_21_0.png b/dev/docs/html/_static/hmm_21_0.png new file mode 100644 index 0000000..c0e87ca Binary files /dev/null and b/dev/docs/html/_static/hmm_21_0.png differ diff --git a/dev/docs/html/_static/hmm_25_0.png b/dev/docs/html/_static/hmm_25_0.png new file mode 100644 index 0000000..e27f714 Binary files /dev/null and b/dev/docs/html/_static/hmm_25_0.png differ diff --git a/dev/docs/html/_static/hmm_28_0.png b/dev/docs/html/_static/hmm_28_0.png new file mode 100644 index 0000000..3581c82 Binary files /dev/null and b/dev/docs/html/_static/hmm_28_0.png differ diff --git a/dev/docs/html/_static/hmm_40_0.png b/dev/docs/html/_static/hmm_40_0.png new file mode 100644 index 0000000..207cb08 Binary files /dev/null and b/dev/docs/html/_static/hmm_40_0.png differ diff --git a/dev/docs/html/_static/hmm_43_0.png b/dev/docs/html/_static/hmm_43_0.png new file mode 100644 index 0000000..e01bd7a Binary files /dev/null and b/dev/docs/html/_static/hmm_43_0.png differ diff --git a/dev/docs/html/_static/hmm_53_0.png b/dev/docs/html/_static/hmm_53_0.png new file mode 100644 index 0000000..9d1004c Binary files /dev/null and b/dev/docs/html/_static/hmm_53_0.png differ diff --git a/dev/docs/html/_static/hmm_9_0.png b/dev/docs/html/_static/hmm_9_0.png new file mode 100644 index 0000000..518a949 Binary files /dev/null and b/dev/docs/html/_static/hmm_9_0.png differ diff --git a/dev/docs/html/_static/hymod.png b/dev/docs/html/_static/hymod.png new file mode 100644 index 0000000..79bf93e Binary files /dev/null and b/dev/docs/html/_static/hymod.png differ diff --git a/dev/docs/html/_static/hymod1.png b/dev/docs/html/_static/hymod1.png new file mode 100644 index 0000000..e5c5629 Binary files /dev/null and b/dev/docs/html/_static/hymod1.png differ diff --git a/dev/docs/html/_static/hymod10.png b/dev/docs/html/_static/hymod10.png new file mode 100644 index 0000000..9c48b8f Binary files /dev/null and b/dev/docs/html/_static/hymod10.png differ diff --git a/dev/docs/html/_static/hymod11.png b/dev/docs/html/_static/hymod11.png new file mode 100644 index 0000000..4e67b3e Binary files /dev/null and b/dev/docs/html/_static/hymod11.png differ diff --git a/dev/docs/html/_static/hymod12.png b/dev/docs/html/_static/hymod12.png new file mode 100644 index 0000000..f73de92 Binary files /dev/null and b/dev/docs/html/_static/hymod12.png differ diff --git a/dev/docs/html/_static/hymod2.png b/dev/docs/html/_static/hymod2.png new file mode 100644 index 0000000..c2310d4 Binary files /dev/null and b/dev/docs/html/_static/hymod2.png differ diff --git a/dev/docs/html/_static/hymod3.png b/dev/docs/html/_static/hymod3.png new file mode 100644 index 0000000..0411f21 Binary files /dev/null and b/dev/docs/html/_static/hymod3.png differ diff --git a/dev/docs/html/_static/hymod4.png b/dev/docs/html/_static/hymod4.png new file mode 100644 index 0000000..d1d2b4b Binary files /dev/null and b/dev/docs/html/_static/hymod4.png differ diff --git a/dev/docs/html/_static/hymod5.png b/dev/docs/html/_static/hymod5.png new file mode 100644 index 0000000..a58dc6b Binary files /dev/null and b/dev/docs/html/_static/hymod5.png differ diff --git a/dev/docs/html/_static/hymod6.png b/dev/docs/html/_static/hymod6.png new file mode 100644 index 0000000..0a0db28 Binary files /dev/null and b/dev/docs/html/_static/hymod6.png differ diff --git a/dev/docs/html/_static/hymod7.png b/dev/docs/html/_static/hymod7.png new file mode 100644 index 0000000..e8f6779 Binary files /dev/null and b/dev/docs/html/_static/hymod7.png differ diff --git a/dev/docs/html/_static/hymod8.png b/dev/docs/html/_static/hymod8.png new file mode 100644 index 0000000..dd3d625 Binary files /dev/null and b/dev/docs/html/_static/hymod8.png differ diff --git a/dev/docs/html/_static/hymod9.png b/dev/docs/html/_static/hymod9.png new file mode 100644 index 0000000..e5f56d0 Binary files /dev/null and b/dev/docs/html/_static/hymod9.png differ diff --git a/dev/docs/html/_static/hymod_schematic-DAVE.png b/dev/docs/html/_static/hymod_schematic-DAVE.png new file mode 100644 index 0000000..83406b4 Binary files /dev/null and b/dev/docs/html/_static/hymod_schematic-DAVE.png differ diff --git a/dev/docs/html/_static/im3.png b/dev/docs/html/_static/im3.png new file mode 100644 index 0000000..de17482 Binary files /dev/null and b/dev/docs/html/_static/im3.png differ diff --git a/dev/docs/html/_static/images/logo_binder.svg b/dev/docs/html/_static/images/logo_binder.svg new file mode 100644 index 0000000..45fecf7 --- /dev/null +++ b/dev/docs/html/_static/images/logo_binder.svg @@ -0,0 +1,19 @@ + + + + +logo + + + + + + + + diff --git a/dev/docs/html/_static/images/logo_colab.png b/dev/docs/html/_static/images/logo_colab.png new file mode 100644 index 0000000..b7560ec Binary files /dev/null and b/dev/docs/html/_static/images/logo_colab.png differ diff --git a/dev/docs/html/_static/images/logo_deepnote.svg b/dev/docs/html/_static/images/logo_deepnote.svg new file mode 100644 index 0000000..fa77ebf --- /dev/null +++ b/dev/docs/html/_static/images/logo_deepnote.svg @@ -0,0 +1 @@ + diff --git a/dev/docs/html/_static/images/logo_jupyterhub.svg b/dev/docs/html/_static/images/logo_jupyterhub.svg new file mode 100644 index 0000000..60cfe9f --- /dev/null +++ b/dev/docs/html/_static/images/logo_jupyterhub.svg @@ -0,0 +1 @@ +logo_jupyterhubHub diff --git a/dev/docs/html/_static/language_data.js b/dev/docs/html/_static/language_data.js new file mode 100644 index 0000000..367b8ed --- /dev/null +++ b/dev/docs/html/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, if available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/dev/docs/html/_static/locales/ar/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/ar/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..15541a6 Binary files /dev/null and b/dev/docs/html/_static/locales/ar/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/ar/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/ar/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..34d404c --- /dev/null +++ b/dev/docs/html/_static/locales/ar/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ar\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "طباعة إلى PDF" + +msgid "Theme by the" +msgstr "موضوع بواسطة" + +msgid "Download source file" +msgstr "تنزيل ملف المصدر" + +msgid "open issue" +msgstr "قضية مفتوحة" + +msgid "Contents" +msgstr "محتويات" + +msgid "previous page" +msgstr "الصفحة السابقة" + +msgid "Download notebook file" +msgstr "تنزيل ملف دفتر الملاحظات" + +msgid "Copyright" +msgstr "حقوق النشر" + +msgid "Download this page" +msgstr "قم بتنزيل هذه الصفحة" + +msgid "Source repository" +msgstr "مستودع المصدر" + +msgid "By" +msgstr "بواسطة" + +msgid "repository" +msgstr "مخزن" + +msgid "Last updated on" +msgstr "آخر تحديث في" + +msgid "Toggle navigation" +msgstr "تبديل التنقل" + +msgid "Sphinx Book Theme" +msgstr "موضوع كتاب أبو الهول" + +msgid "suggest edit" +msgstr "أقترح تحرير" + +msgid "Open an issue" +msgstr "افتح قضية" + +msgid "Launch" +msgstr "إطلاق" + +msgid "Fullscreen mode" +msgstr "وضع ملء الشاشة" + +msgid "Edit this page" +msgstr "قم بتحرير هذه الصفحة" + +msgid "By the" +msgstr "بواسطة" + +msgid "next page" +msgstr "الصفحة التالية" diff --git a/dev/docs/html/_static/locales/bg/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/bg/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..da95120 Binary files /dev/null and b/dev/docs/html/_static/locales/bg/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/bg/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/bg/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..7420c19 --- /dev/null +++ b/dev/docs/html/_static/locales/bg/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: bg\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Печат в PDF" + +msgid "Theme by the" +msgstr "Тема от" + +msgid "Download source file" +msgstr "Изтеглете изходния файл" + +msgid "open issue" +msgstr "отворен брой" + +msgid "Contents" +msgstr "Съдържание" + +msgid "previous page" +msgstr "предишна страница" + +msgid "Download notebook file" +msgstr "Изтеглете файла на бележника" + +msgid "Copyright" +msgstr "Авторско право" + +msgid "Download this page" +msgstr "Изтеглете тази страница" + +msgid "Source repository" +msgstr "Хранилище на източника" + +msgid "By" +msgstr "От" + +msgid "repository" +msgstr "хранилище" + +msgid "Last updated on" +msgstr "Последна актуализация на" + +msgid "Toggle navigation" +msgstr "Превключване на навигацията" + +msgid "Sphinx Book Theme" +msgstr "Тема на книгата Sphinx" + +msgid "suggest edit" +msgstr "предложи редактиране" + +msgid "Open an issue" +msgstr "Отворете проблем" + +msgid "Launch" +msgstr "Стартиране" + +msgid "Fullscreen mode" +msgstr "Режим на цял екран" + +msgid "Edit this page" +msgstr "Редактирайте тази страница" + +msgid "By the" +msgstr "По" + +msgid "next page" +msgstr "Следваща страница" diff --git a/dev/docs/html/_static/locales/bn/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/bn/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..6b96639 Binary files /dev/null and b/dev/docs/html/_static/locales/bn/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/bn/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/bn/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..63a07c3 --- /dev/null +++ b/dev/docs/html/_static/locales/bn/LC_MESSAGES/booktheme.po @@ -0,0 +1,63 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: bn\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "পিডিএফ প্রিন্ট করুন" + +msgid "Theme by the" +msgstr "থিম দ্বারা" + +msgid "Download source file" +msgstr "উত্স ফাইল ডাউনলোড করুন" + +msgid "open issue" +msgstr "খোলা সমস্যা" + +msgid "previous page" +msgstr "আগের পৃষ্ঠা" + +msgid "Download notebook file" +msgstr "নোটবুক ফাইল ডাউনলোড করুন" + +msgid "Copyright" +msgstr "কপিরাইট" + +msgid "Download this page" +msgstr "এই পৃষ্ঠাটি ডাউনলোড করুন" + +msgid "Source repository" +msgstr "উত্স সংগ্রহস্থল" + +msgid "By" +msgstr "দ্বারা" + +msgid "Last updated on" +msgstr "সর্বশেষ আপডেট" + +msgid "Toggle navigation" +msgstr "নেভিগেশন টগল করুন" + +msgid "Sphinx Book Theme" +msgstr "স্পিনিক্স বুক থিম" + +msgid "Open an issue" +msgstr "একটি সমস্যা খুলুন" + +msgid "Launch" +msgstr "শুরু করা" + +msgid "Edit this page" +msgstr "এই পৃষ্ঠাটি সম্পাদনা করুন" + +msgid "By the" +msgstr "দ্বারা" + +msgid "next page" +msgstr "পরবর্তী পৃষ্ঠা" diff --git a/dev/docs/html/_static/locales/ca/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/ca/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..a4dd30e Binary files /dev/null and b/dev/docs/html/_static/locales/ca/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/ca/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/ca/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..8fb358b --- /dev/null +++ b/dev/docs/html/_static/locales/ca/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ca\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Imprimeix a PDF" + +msgid "Theme by the" +msgstr "Tema del" + +msgid "Download source file" +msgstr "Baixeu el fitxer font" + +msgid "open issue" +msgstr "número obert" + +msgid "previous page" +msgstr "Pàgina anterior" + +msgid "Download notebook file" +msgstr "Descarregar fitxer de quadern" + +msgid "Copyright" +msgstr "Copyright" + +msgid "Download this page" +msgstr "Descarregueu aquesta pàgina" + +msgid "Source repository" +msgstr "Dipòsit de fonts" + +msgid "By" +msgstr "Per" + +msgid "Last updated on" +msgstr "Darrera actualització el" + +msgid "Toggle navigation" +msgstr "Commuta la navegació" + +msgid "Sphinx Book Theme" +msgstr "Tema del llibre Esfinx" + +msgid "suggest edit" +msgstr "suggerir edició" + +msgid "Open an issue" +msgstr "Obriu un número" + +msgid "Launch" +msgstr "Llançament" + +msgid "Edit this page" +msgstr "Editeu aquesta pàgina" + +msgid "By the" +msgstr "Per la" + +msgid "next page" +msgstr "pàgina següent" diff --git a/dev/docs/html/_static/locales/cs/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/cs/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..c39e01a Binary files /dev/null and b/dev/docs/html/_static/locales/cs/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/cs/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/cs/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..c6ef469 --- /dev/null +++ b/dev/docs/html/_static/locales/cs/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: cs\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Tisk do PDF" + +msgid "Theme by the" +msgstr "Téma od" + +msgid "Download source file" +msgstr "Stáhněte si zdrojový soubor" + +msgid "open issue" +msgstr "otevřené číslo" + +msgid "Contents" +msgstr "Obsah" + +msgid "previous page" +msgstr "předchozí stránka" + +msgid "Download notebook file" +msgstr "Stáhnout soubor poznámkového bloku" + +msgid "Copyright" +msgstr "autorská práva" + +msgid "Download this page" +msgstr "Stáhněte si tuto stránku" + +msgid "Source repository" +msgstr "Zdrojové úložiště" + +msgid "By" +msgstr "Podle" + +msgid "repository" +msgstr "úložiště" + +msgid "Last updated on" +msgstr "Naposledy aktualizováno" + +msgid "Toggle navigation" +msgstr "Přepnout navigaci" + +msgid "Sphinx Book Theme" +msgstr "Téma knihy Sfinga" + +msgid "suggest edit" +msgstr "navrhnout úpravy" + +msgid "Open an issue" +msgstr "Otevřete problém" + +msgid "Launch" +msgstr "Zahájení" + +msgid "Fullscreen mode" +msgstr "Režim celé obrazovky" + +msgid "Edit this page" +msgstr "Upravit tuto stránku" + +msgid "By the" +msgstr "Podle" + +msgid "next page" +msgstr "další strana" diff --git a/dev/docs/html/_static/locales/da/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/da/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..f43157d Binary files /dev/null and b/dev/docs/html/_static/locales/da/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/da/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/da/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..306a38e --- /dev/null +++ b/dev/docs/html/_static/locales/da/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: da\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Udskriv til PDF" + +msgid "Theme by the" +msgstr "Tema af" + +msgid "Download source file" +msgstr "Download kildefil" + +msgid "open issue" +msgstr "åbent nummer" + +msgid "Contents" +msgstr "Indhold" + +msgid "previous page" +msgstr "forrige side" + +msgid "Download notebook file" +msgstr "Download notesbog-fil" + +msgid "Copyright" +msgstr "ophavsret" + +msgid "Download this page" +msgstr "Download denne side" + +msgid "Source repository" +msgstr "Kildelager" + +msgid "By" +msgstr "Ved" + +msgid "repository" +msgstr "lager" + +msgid "Last updated on" +msgstr "Sidst opdateret den" + +msgid "Toggle navigation" +msgstr "Skift navigation" + +msgid "Sphinx Book Theme" +msgstr "Sphinx bogtema" + +msgid "suggest edit" +msgstr "foreslå redigering" + +msgid "Open an issue" +msgstr "Åbn et problem" + +msgid "Launch" +msgstr "Start" + +msgid "Fullscreen mode" +msgstr "Fuldskærmstilstand" + +msgid "Edit this page" +msgstr "Rediger denne side" + +msgid "By the" +msgstr "Ved" + +msgid "next page" +msgstr "Næste side" diff --git a/dev/docs/html/_static/locales/de/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/de/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..648b565 Binary files /dev/null and b/dev/docs/html/_static/locales/de/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/de/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/de/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..4925360 --- /dev/null +++ b/dev/docs/html/_static/locales/de/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: de\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "In PDF drucken" + +msgid "Theme by the" +msgstr "Thema von der" + +msgid "Download source file" +msgstr "Quelldatei herunterladen" + +msgid "open issue" +msgstr "offenes Thema" + +msgid "Contents" +msgstr "Inhalt" + +msgid "previous page" +msgstr "vorherige Seite" + +msgid "Download notebook file" +msgstr "Notebook-Datei herunterladen" + +msgid "Copyright" +msgstr "Urheberrechte ©" + +msgid "Download this page" +msgstr "Laden Sie diese Seite herunter" + +msgid "Source repository" +msgstr "Quell-Repository" + +msgid "By" +msgstr "Durch" + +msgid "repository" +msgstr "Repository" + +msgid "Last updated on" +msgstr "Zuletzt aktualisiert am" + +msgid "Toggle navigation" +msgstr "Navigation umschalten" + +msgid "Sphinx Book Theme" +msgstr "Sphinx-Buch-Thema" + +msgid "suggest edit" +msgstr "vorschlagen zu bearbeiten" + +msgid "Open an issue" +msgstr "Öffnen Sie ein Problem" + +msgid "Launch" +msgstr "Starten" + +msgid "Fullscreen mode" +msgstr "Vollbildmodus" + +msgid "Edit this page" +msgstr "Bearbeite diese Seite" + +msgid "By the" +msgstr "Bis zum" + +msgid "next page" +msgstr "Nächste Seite" diff --git a/dev/docs/html/_static/locales/el/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/el/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..fca6e93 Binary files /dev/null and b/dev/docs/html/_static/locales/el/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/el/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/el/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..3e01acb --- /dev/null +++ b/dev/docs/html/_static/locales/el/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: el\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Εκτύπωση σε PDF" + +msgid "Theme by the" +msgstr "Θέμα από το" + +msgid "Download source file" +msgstr "Λήψη αρχείου προέλευσης" + +msgid "open issue" +msgstr "ανοιχτό ζήτημα" + +msgid "Contents" +msgstr "Περιεχόμενα" + +msgid "previous page" +msgstr "προηγούμενη σελίδα" + +msgid "Download notebook file" +msgstr "Λήψη αρχείου σημειωματάριου" + +msgid "Copyright" +msgstr "Πνευματική ιδιοκτησία" + +msgid "Download this page" +msgstr "Λήψη αυτής της σελίδας" + +msgid "Source repository" +msgstr "Αποθήκη πηγής" + +msgid "By" +msgstr "Με" + +msgid "repository" +msgstr "αποθήκη" + +msgid "Last updated on" +msgstr "Τελευταία ενημέρωση στις" + +msgid "Toggle navigation" +msgstr "Εναλλαγή πλοήγησης" + +msgid "Sphinx Book Theme" +msgstr "Θέμα βιβλίου Sphinx" + +msgid "suggest edit" +msgstr "προτείνω επεξεργασία" + +msgid "Open an issue" +msgstr "Ανοίξτε ένα ζήτημα" + +msgid "Launch" +msgstr "Εκτόξευση" + +msgid "Fullscreen mode" +msgstr "ΛΕΙΤΟΥΡΓΙΑ ΠΛΗΡΟΥΣ ΟΘΟΝΗΣ" + +msgid "Edit this page" +msgstr "Επεξεργαστείτε αυτήν τη σελίδα" + +msgid "By the" +msgstr "Από το" + +msgid "next page" +msgstr "επόμενη σελίδα" diff --git a/dev/docs/html/_static/locales/eo/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/eo/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..d1072bb Binary files /dev/null and b/dev/docs/html/_static/locales/eo/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/eo/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/eo/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..f7ed226 --- /dev/null +++ b/dev/docs/html/_static/locales/eo/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: eo\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Presi al PDF" + +msgid "Theme by the" +msgstr "Temo de la" + +msgid "Download source file" +msgstr "Elŝutu fontodosieron" + +msgid "open issue" +msgstr "malferma numero" + +msgid "Contents" +msgstr "Enhavo" + +msgid "previous page" +msgstr "antaŭa paĝo" + +msgid "Download notebook file" +msgstr "Elŝutu kajeran dosieron" + +msgid "Copyright" +msgstr "Kopirajto" + +msgid "Download this page" +msgstr "Elŝutu ĉi tiun paĝon" + +msgid "Source repository" +msgstr "Fonto-deponejo" + +msgid "By" +msgstr "De" + +msgid "repository" +msgstr "deponejo" + +msgid "Last updated on" +msgstr "Laste ĝisdatigita la" + +msgid "Toggle navigation" +msgstr "Ŝalti navigadon" + +msgid "Sphinx Book Theme" +msgstr "Sfinksa Libro-Temo" + +msgid "suggest edit" +msgstr "sugesti redaktadon" + +msgid "Open an issue" +msgstr "Malfermu numeron" + +msgid "Launch" +msgstr "Lanĉo" + +msgid "Fullscreen mode" +msgstr "Plenekrana reĝimo" + +msgid "Edit this page" +msgstr "Redaktu ĉi tiun paĝon" + +msgid "By the" +msgstr "Per la" + +msgid "next page" +msgstr "sekva paĝo" diff --git a/dev/docs/html/_static/locales/es/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/es/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..ba2ee4d Binary files /dev/null and b/dev/docs/html/_static/locales/es/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/es/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/es/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..5e0029e --- /dev/null +++ b/dev/docs/html/_static/locales/es/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: es\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Imprimir en PDF" + +msgid "Theme by the" +msgstr "Tema por el" + +msgid "Download source file" +msgstr "Descargar archivo fuente" + +msgid "open issue" +msgstr "Tema abierto" + +msgid "Contents" +msgstr "Contenido" + +msgid "previous page" +msgstr "pagina anterior" + +msgid "Download notebook file" +msgstr "Descargar archivo de cuaderno" + +msgid "Copyright" +msgstr "Derechos de autor" + +msgid "Download this page" +msgstr "Descarga esta pagina" + +msgid "Source repository" +msgstr "Repositorio de origen" + +msgid "By" +msgstr "Por" + +msgid "repository" +msgstr "repositorio" + +msgid "Last updated on" +msgstr "Ultima actualización en" + +msgid "Toggle navigation" +msgstr "Navegación de palanca" + +msgid "Sphinx Book Theme" +msgstr "Tema del libro de la esfinge" + +msgid "suggest edit" +msgstr "sugerir editar" + +msgid "Open an issue" +msgstr "Abrir un problema" + +msgid "Launch" +msgstr "Lanzamiento" + +msgid "Fullscreen mode" +msgstr "Modo de pantalla completa" + +msgid "Edit this page" +msgstr "Edita esta página" + +msgid "By the" +msgstr "Por el" + +msgid "next page" +msgstr "siguiente página" diff --git a/dev/docs/html/_static/locales/et/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/et/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..983b823 Binary files /dev/null and b/dev/docs/html/_static/locales/et/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/et/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/et/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..8680982 --- /dev/null +++ b/dev/docs/html/_static/locales/et/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: et\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Prindi PDF-i" + +msgid "Theme by the" +msgstr "Teema" + +msgid "Download source file" +msgstr "Laadige alla lähtefail" + +msgid "open issue" +msgstr "avatud küsimus" + +msgid "Contents" +msgstr "Sisu" + +msgid "previous page" +msgstr "eelmine leht" + +msgid "Download notebook file" +msgstr "Laadige sülearvuti fail alla" + +msgid "Copyright" +msgstr "Autoriõigus" + +msgid "Download this page" +msgstr "Laadige see leht alla" + +msgid "Source repository" +msgstr "Allikahoidla" + +msgid "By" +msgstr "Kõrval" + +msgid "repository" +msgstr "hoidla" + +msgid "Last updated on" +msgstr "Viimati uuendatud" + +msgid "Toggle navigation" +msgstr "Lülita navigeerimine sisse" + +msgid "Sphinx Book Theme" +msgstr "Sfinksiraamatu teema" + +msgid "suggest edit" +msgstr "soovita muuta" + +msgid "Open an issue" +msgstr "Avage probleem" + +msgid "Launch" +msgstr "Käivitage" + +msgid "Fullscreen mode" +msgstr "Täisekraanirežiim" + +msgid "Edit this page" +msgstr "Muutke seda lehte" + +msgid "By the" +msgstr "Autor" + +msgid "next page" +msgstr "järgmine leht" diff --git a/dev/docs/html/_static/locales/fi/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/fi/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..d8ac054 Binary files /dev/null and b/dev/docs/html/_static/locales/fi/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/fi/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/fi/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..34dac21 --- /dev/null +++ b/dev/docs/html/_static/locales/fi/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fi\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Tulosta PDF-tiedostoon" + +msgid "Theme by the" +msgstr "Teeman tekijä" + +msgid "Download source file" +msgstr "Lataa lähdetiedosto" + +msgid "open issue" +msgstr "avoin ongelma" + +msgid "Contents" +msgstr "Sisällys" + +msgid "previous page" +msgstr "Edellinen sivu" + +msgid "Download notebook file" +msgstr "Lataa muistikirjatiedosto" + +msgid "Copyright" +msgstr "Tekijänoikeus" + +msgid "Download this page" +msgstr "Lataa tämä sivu" + +msgid "Source repository" +msgstr "Lähteen arkisto" + +msgid "By" +msgstr "Tekijä" + +msgid "repository" +msgstr "arkisto" + +msgid "Last updated on" +msgstr "Viimeksi päivitetty" + +msgid "Toggle navigation" +msgstr "Vaihda navigointia" + +msgid "Sphinx Book Theme" +msgstr "Sphinx-kirjan teema" + +msgid "suggest edit" +msgstr "ehdottaa muokkausta" + +msgid "Open an issue" +msgstr "Avaa ongelma" + +msgid "Launch" +msgstr "Tuoda markkinoille" + +msgid "Fullscreen mode" +msgstr "Koko näytön tila" + +msgid "Edit this page" +msgstr "Muokkaa tätä sivua" + +msgid "By the" +msgstr "Mukaan" + +msgid "next page" +msgstr "seuraava sivu" diff --git a/dev/docs/html/_static/locales/fr/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/fr/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..f663d39 Binary files /dev/null and b/dev/docs/html/_static/locales/fr/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/fr/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/fr/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..8991a1b --- /dev/null +++ b/dev/docs/html/_static/locales/fr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Imprimer au format PDF" + +msgid "Theme by the" +msgstr "Thème par le" + +msgid "Download source file" +msgstr "Télécharger le fichier source" + +msgid "open issue" +msgstr "signaler un problème" + +msgid "Contents" +msgstr "Contenu" + +msgid "previous page" +msgstr "page précédente" + +msgid "Download notebook file" +msgstr "Télécharger le fichier notebook" + +msgid "Copyright" +msgstr "droits d'auteur" + +msgid "Download this page" +msgstr "Téléchargez cette page" + +msgid "Source repository" +msgstr "Dépôt source" + +msgid "By" +msgstr "Par" + +msgid "repository" +msgstr "dépôt" + +msgid "Last updated on" +msgstr "Dernière mise à jour le" + +msgid "Toggle navigation" +msgstr "Basculer la navigation" + +msgid "Sphinx Book Theme" +msgstr "Thème du livre Sphinx" + +msgid "suggest edit" +msgstr "suggestion de modification" + +msgid "Open an issue" +msgstr "Ouvrez un problème" + +msgid "Launch" +msgstr "lancement" + +msgid "Fullscreen mode" +msgstr "Mode plein écran" + +msgid "Edit this page" +msgstr "Modifier cette page" + +msgid "By the" +msgstr "Par le" + +msgid "next page" +msgstr "page suivante" diff --git a/dev/docs/html/_static/locales/hr/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/hr/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..eca4a1a Binary files /dev/null and b/dev/docs/html/_static/locales/hr/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/hr/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/hr/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..42c4233 --- /dev/null +++ b/dev/docs/html/_static/locales/hr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: hr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Ispis u PDF" + +msgid "Theme by the" +msgstr "Tema autora" + +msgid "Download source file" +msgstr "Preuzmi izvornu datoteku" + +msgid "open issue" +msgstr "otvoreno izdanje" + +msgid "Contents" +msgstr "Sadržaj" + +msgid "previous page" +msgstr "Prethodna stranica" + +msgid "Download notebook file" +msgstr "Preuzmi datoteku bilježnice" + +msgid "Copyright" +msgstr "Autorska prava" + +msgid "Download this page" +msgstr "Preuzmite ovu stranicu" + +msgid "Source repository" +msgstr "Izvorno spremište" + +msgid "By" +msgstr "Po" + +msgid "repository" +msgstr "spremište" + +msgid "Last updated on" +msgstr "Posljednje ažuriranje:" + +msgid "Toggle navigation" +msgstr "Uključi / isključi navigaciju" + +msgid "Sphinx Book Theme" +msgstr "Tema knjige Sphinx" + +msgid "suggest edit" +msgstr "predloži uređivanje" + +msgid "Open an issue" +msgstr "Otvorite izdanje" + +msgid "Launch" +msgstr "Pokrenite" + +msgid "Fullscreen mode" +msgstr "Način preko cijelog zaslona" + +msgid "Edit this page" +msgstr "Uredite ovu stranicu" + +msgid "By the" +msgstr "Od strane" + +msgid "next page" +msgstr "sljedeća stranica" diff --git a/dev/docs/html/_static/locales/id/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/id/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..d07a06a Binary files /dev/null and b/dev/docs/html/_static/locales/id/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/id/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/id/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..b8d8d89 --- /dev/null +++ b/dev/docs/html/_static/locales/id/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: id\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Cetak ke PDF" + +msgid "Theme by the" +msgstr "Tema oleh" + +msgid "Download source file" +msgstr "Unduh file sumber" + +msgid "open issue" +msgstr "masalah terbuka" + +msgid "Contents" +msgstr "Isi" + +msgid "previous page" +msgstr "halaman sebelumnya" + +msgid "Download notebook file" +msgstr "Unduh file notebook" + +msgid "Copyright" +msgstr "hak cipta" + +msgid "Download this page" +msgstr "Unduh halaman ini" + +msgid "Source repository" +msgstr "Repositori sumber" + +msgid "By" +msgstr "Oleh" + +msgid "repository" +msgstr "gudang" + +msgid "Last updated on" +msgstr "Terakhir diperbarui saat" + +msgid "Toggle navigation" +msgstr "Alihkan navigasi" + +msgid "Sphinx Book Theme" +msgstr "Tema Buku Sphinx" + +msgid "suggest edit" +msgstr "menyarankan edit" + +msgid "Open an issue" +msgstr "Buka masalah" + +msgid "Launch" +msgstr "Meluncurkan" + +msgid "Fullscreen mode" +msgstr "Mode layar penuh" + +msgid "Edit this page" +msgstr "Edit halaman ini" + +msgid "By the" +msgstr "Oleh" + +msgid "next page" +msgstr "halaman selanjutnya" diff --git a/dev/docs/html/_static/locales/it/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/it/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..53ba476 Binary files /dev/null and b/dev/docs/html/_static/locales/it/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/it/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/it/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..36fca59 --- /dev/null +++ b/dev/docs/html/_static/locales/it/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: it\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Stampa in PDF" + +msgid "Theme by the" +msgstr "Tema di" + +msgid "Download source file" +msgstr "Scarica il file sorgente" + +msgid "open issue" +msgstr "questione aperta" + +msgid "Contents" +msgstr "Contenuti" + +msgid "previous page" +msgstr "pagina precedente" + +msgid "Download notebook file" +msgstr "Scarica il file del taccuino" + +msgid "Copyright" +msgstr "Diritto d'autore" + +msgid "Download this page" +msgstr "Scarica questa pagina" + +msgid "Source repository" +msgstr "Repository di origine" + +msgid "By" +msgstr "Di" + +msgid "repository" +msgstr "repository" + +msgid "Last updated on" +msgstr "Ultimo aggiornamento il" + +msgid "Toggle navigation" +msgstr "Attiva / disattiva la navigazione" + +msgid "Sphinx Book Theme" +msgstr "Tema del libro della Sfinge" + +msgid "suggest edit" +msgstr "suggerisci modifica" + +msgid "Open an issue" +msgstr "Apri un problema" + +msgid "Launch" +msgstr "Lanciare" + +msgid "Fullscreen mode" +msgstr "Modalità schermo intero" + +msgid "Edit this page" +msgstr "Modifica questa pagina" + +msgid "By the" +msgstr "Dal" + +msgid "next page" +msgstr "pagina successiva" diff --git a/dev/docs/html/_static/locales/iw/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/iw/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..a45c657 Binary files /dev/null and b/dev/docs/html/_static/locales/iw/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/iw/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/iw/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..dede9cb --- /dev/null +++ b/dev/docs/html/_static/locales/iw/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: iw\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "הדפס לקובץ PDF" + +msgid "Theme by the" +msgstr "נושא מאת" + +msgid "Download source file" +msgstr "הורד את קובץ המקור" + +msgid "open issue" +msgstr "בעיה פתוחה" + +msgid "Contents" +msgstr "תוכן" + +msgid "previous page" +msgstr "עמוד קודם" + +msgid "Download notebook file" +msgstr "הורד קובץ מחברת" + +msgid "Copyright" +msgstr "זכויות יוצרים" + +msgid "Download this page" +msgstr "הורד דף זה" + +msgid "Source repository" +msgstr "מאגר המקורות" + +msgid "By" +msgstr "על ידי" + +msgid "repository" +msgstr "מאגר" + +msgid "Last updated on" +msgstr "עודכן לאחרונה ב" + +msgid "Toggle navigation" +msgstr "החלף ניווט" + +msgid "Sphinx Book Theme" +msgstr "נושא ספר ספינקס" + +msgid "suggest edit" +msgstr "מציע לערוך" + +msgid "Open an issue" +msgstr "פתח גיליון" + +msgid "Launch" +msgstr "לְהַשִׁיק" + +msgid "Fullscreen mode" +msgstr "מצב מסך מלא" + +msgid "Edit this page" +msgstr "ערוך דף זה" + +msgid "By the" +msgstr "דרך" + +msgid "next page" +msgstr "עמוד הבא" diff --git a/dev/docs/html/_static/locales/ja/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/ja/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..1cefd29 Binary files /dev/null and b/dev/docs/html/_static/locales/ja/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/ja/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/ja/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..2615f0d --- /dev/null +++ b/dev/docs/html/_static/locales/ja/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ja\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDFに印刷" + +msgid "Theme by the" +msgstr "のテーマ" + +msgid "Download source file" +msgstr "ソースファイルをダウンロード" + +msgid "open issue" +msgstr "未解決の問題" + +msgid "Contents" +msgstr "目次" + +msgid "previous page" +msgstr "前のページ" + +msgid "Download notebook file" +msgstr "ノートブックファイルをダウンロード" + +msgid "Copyright" +msgstr "Copyright" + +msgid "Download this page" +msgstr "このページをダウンロード" + +msgid "Source repository" +msgstr "ソースリポジトリ" + +msgid "By" +msgstr "著者" + +msgid "repository" +msgstr "リポジトリ" + +msgid "Last updated on" +msgstr "最終更新日" + +msgid "Toggle navigation" +msgstr "ナビゲーションを切り替え" + +msgid "Sphinx Book Theme" +msgstr "スフィンクスの本のテーマ" + +msgid "suggest edit" +msgstr "編集を提案する" + +msgid "Open an issue" +msgstr "問題を報告" + +msgid "Launch" +msgstr "起動" + +msgid "Fullscreen mode" +msgstr "全画面モード" + +msgid "Edit this page" +msgstr "このページを編集" + +msgid "By the" +msgstr "によって" + +msgid "next page" +msgstr "次のページ" diff --git a/dev/docs/html/_static/locales/ko/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/ko/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..06c7ec9 Binary files /dev/null and b/dev/docs/html/_static/locales/ko/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/ko/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/ko/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..c9e13a4 --- /dev/null +++ b/dev/docs/html/_static/locales/ko/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ko\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDF로 인쇄" + +msgid "Theme by the" +msgstr "테마별" + +msgid "Download source file" +msgstr "소스 파일 다운로드" + +msgid "open issue" +msgstr "열린 문제" + +msgid "Contents" +msgstr "내용" + +msgid "previous page" +msgstr "이전 페이지" + +msgid "Download notebook file" +msgstr "노트북 파일 다운로드" + +msgid "Copyright" +msgstr "저작권" + +msgid "Download this page" +msgstr "이 페이지 다운로드" + +msgid "Source repository" +msgstr "소스 저장소" + +msgid "By" +msgstr "으로" + +msgid "repository" +msgstr "저장소" + +msgid "Last updated on" +msgstr "마지막 업데이트" + +msgid "Toggle navigation" +msgstr "탐색 전환" + +msgid "Sphinx Book Theme" +msgstr "스핑크스 도서 테마" + +msgid "suggest edit" +msgstr "편집 제안" + +msgid "Open an issue" +msgstr "이슈 열기" + +msgid "Launch" +msgstr "시작하다" + +msgid "Fullscreen mode" +msgstr "전체 화면으로보기" + +msgid "Edit this page" +msgstr "이 페이지 편집" + +msgid "By the" +msgstr "에 의해" + +msgid "next page" +msgstr "다음 페이지" diff --git a/dev/docs/html/_static/locales/lt/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/lt/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..4468ba0 Binary files /dev/null and b/dev/docs/html/_static/locales/lt/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/lt/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/lt/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..35eabd9 --- /dev/null +++ b/dev/docs/html/_static/locales/lt/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: lt\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Spausdinti į PDF" + +msgid "Theme by the" +msgstr "Tema" + +msgid "Download source file" +msgstr "Atsisiųsti šaltinio failą" + +msgid "open issue" +msgstr "atviras klausimas" + +msgid "Contents" +msgstr "Turinys" + +msgid "previous page" +msgstr "Ankstesnis puslapis" + +msgid "Download notebook file" +msgstr "Atsisiųsti nešiojamojo kompiuterio failą" + +msgid "Copyright" +msgstr "Autorių teisės" + +msgid "Download this page" +msgstr "Atsisiųskite šį puslapį" + +msgid "Source repository" +msgstr "Šaltinio saugykla" + +msgid "By" +msgstr "Iki" + +msgid "repository" +msgstr "saugykla" + +msgid "Last updated on" +msgstr "Paskutinį kartą atnaujinta" + +msgid "Toggle navigation" +msgstr "Perjungti naršymą" + +msgid "Sphinx Book Theme" +msgstr "Sfinkso knygos tema" + +msgid "suggest edit" +msgstr "pasiūlyti redaguoti" + +msgid "Open an issue" +msgstr "Atidarykite problemą" + +msgid "Launch" +msgstr "Paleiskite" + +msgid "Fullscreen mode" +msgstr "Pilno ekrano režimas" + +msgid "Edit this page" +msgstr "Redaguoti šį puslapį" + +msgid "By the" +msgstr "Prie" + +msgid "next page" +msgstr "Kitas puslapis" diff --git a/dev/docs/html/_static/locales/lv/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/lv/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..74aa4d8 Binary files /dev/null and b/dev/docs/html/_static/locales/lv/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/lv/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/lv/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..ee1bd08 --- /dev/null +++ b/dev/docs/html/_static/locales/lv/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: lv\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Drukāt PDF formātā" + +msgid "Theme by the" +msgstr "Autora tēma" + +msgid "Download source file" +msgstr "Lejupielādēt avota failu" + +msgid "open issue" +msgstr "atklāts jautājums" + +msgid "Contents" +msgstr "Saturs" + +msgid "previous page" +msgstr "iepriekšējā lapa" + +msgid "Download notebook file" +msgstr "Lejupielādēt piezīmju grāmatiņu" + +msgid "Copyright" +msgstr "Autortiesības" + +msgid "Download this page" +msgstr "Lejupielādējiet šo lapu" + +msgid "Source repository" +msgstr "Avota krātuve" + +msgid "By" +msgstr "Autors" + +msgid "repository" +msgstr "krātuve" + +msgid "Last updated on" +msgstr "Pēdējoreiz atjaunināts" + +msgid "Toggle navigation" +msgstr "Pārslēgt navigāciju" + +msgid "Sphinx Book Theme" +msgstr "Sfinksa grāmatas tēma" + +msgid "suggest edit" +msgstr "ieteikt rediģēt" + +msgid "Open an issue" +msgstr "Atveriet problēmu" + +msgid "Launch" +msgstr "Uzsākt" + +msgid "Fullscreen mode" +msgstr "Pilnekrāna režīms" + +msgid "Edit this page" +msgstr "Rediģēt šo lapu" + +msgid "By the" +msgstr "Ar" + +msgid "next page" +msgstr "nākamā lapaspuse" diff --git a/dev/docs/html/_static/locales/ml/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/ml/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..2736e8f Binary files /dev/null and b/dev/docs/html/_static/locales/ml/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/ml/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/ml/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..d471277 --- /dev/null +++ b/dev/docs/html/_static/locales/ml/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ml\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDF- ലേക്ക് പ്രിന്റുചെയ്യുക" + +msgid "Theme by the" +msgstr "പ്രമേയം" + +msgid "Download source file" +msgstr "ഉറവിട ഫയൽ ഡൗൺലോഡുചെയ്യുക" + +msgid "open issue" +msgstr "തുറന്ന പ്രശ്നം" + +msgid "previous page" +msgstr "മുൻപത്തെ താൾ" + +msgid "Download notebook file" +msgstr "നോട്ട്ബുക്ക് ഫയൽ ഡൺലോഡ് ചെയ്യുക" + +msgid "Copyright" +msgstr "പകർപ്പവകാശം" + +msgid "Download this page" +msgstr "ഈ പേജ് ഡൗൺലോഡുചെയ്യുക" + +msgid "Source repository" +msgstr "ഉറവിട ശേഖരം" + +msgid "By" +msgstr "എഴുതിയത്" + +msgid "Last updated on" +msgstr "അവസാനം അപ്‌ഡേറ്റുചെയ്‌തത്" + +msgid "Toggle navigation" +msgstr "നാവിഗേഷൻ ടോഗിൾ ചെയ്യുക" + +msgid "Sphinx Book Theme" +msgstr "സ്ഫിങ്ക്സ് പുസ്തക തീം" + +msgid "suggest edit" +msgstr "എഡിറ്റുചെയ്യാൻ നിർദ്ദേശിക്കുക" + +msgid "Open an issue" +msgstr "ഒരു പ്രശ്നം തുറക്കുക" + +msgid "Launch" +msgstr "സമാരംഭിക്കുക" + +msgid "Edit this page" +msgstr "ഈ പേജ് എഡിറ്റുചെയ്യുക" + +msgid "By the" +msgstr "എഴുതിയത്" + +msgid "next page" +msgstr "അടുത്ത പേജ്" diff --git a/dev/docs/html/_static/locales/mr/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/mr/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..fe53010 Binary files /dev/null and b/dev/docs/html/_static/locales/mr/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/mr/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/mr/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..f3694ac --- /dev/null +++ b/dev/docs/html/_static/locales/mr/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: mr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "पीडीएफवर मुद्रित करा" + +msgid "Theme by the" +msgstr "द्वारा थीम" + +msgid "Download source file" +msgstr "स्त्रोत फाइल डाउनलोड करा" + +msgid "open issue" +msgstr "खुला मुद्दा" + +msgid "previous page" +msgstr "मागील पान" + +msgid "Download notebook file" +msgstr "नोटबुक फाईल डाउनलोड करा" + +msgid "Copyright" +msgstr "कॉपीराइट" + +msgid "Download this page" +msgstr "हे पृष्ठ डाउनलोड करा" + +msgid "Source repository" +msgstr "स्त्रोत भांडार" + +msgid "By" +msgstr "द्वारा" + +msgid "Last updated on" +msgstr "अखेरचे अद्यतनित" + +msgid "Toggle navigation" +msgstr "नेव्हिगेशन टॉगल करा" + +msgid "Sphinx Book Theme" +msgstr "स्फिंक्स बुक थीम" + +msgid "suggest edit" +msgstr "संपादन सुचवा" + +msgid "Open an issue" +msgstr "एक मुद्दा उघडा" + +msgid "Launch" +msgstr "लाँच करा" + +msgid "Edit this page" +msgstr "हे पृष्ठ संपादित करा" + +msgid "By the" +msgstr "द्वारा" + +msgid "next page" +msgstr "पुढील पृष्ठ" diff --git a/dev/docs/html/_static/locales/ms/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/ms/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..f02603f Binary files /dev/null and b/dev/docs/html/_static/locales/ms/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/ms/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/ms/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..65b7c60 --- /dev/null +++ b/dev/docs/html/_static/locales/ms/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ms\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Cetak ke PDF" + +msgid "Theme by the" +msgstr "Tema oleh" + +msgid "Download source file" +msgstr "Muat turun fail sumber" + +msgid "open issue" +msgstr "isu terbuka" + +msgid "previous page" +msgstr "halaman sebelumnya" + +msgid "Download notebook file" +msgstr "Muat turun fail buku nota" + +msgid "Copyright" +msgstr "hak cipta" + +msgid "Download this page" +msgstr "Muat turun halaman ini" + +msgid "Source repository" +msgstr "Repositori sumber" + +msgid "By" +msgstr "Oleh" + +msgid "Last updated on" +msgstr "Terakhir dikemas kini pada" + +msgid "Toggle navigation" +msgstr "Togol navigasi" + +msgid "Sphinx Book Theme" +msgstr "Tema Buku Sphinx" + +msgid "suggest edit" +msgstr "cadangkan edit" + +msgid "Open an issue" +msgstr "Buka masalah" + +msgid "Launch" +msgstr "Lancarkan" + +msgid "Edit this page" +msgstr "Edit halaman ini" + +msgid "By the" +msgstr "Oleh" + +msgid "next page" +msgstr "muka surat seterusnya" diff --git a/dev/docs/html/_static/locales/nl/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/nl/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..e59e7ec Binary files /dev/null and b/dev/docs/html/_static/locales/nl/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/nl/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/nl/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..71bd1cd --- /dev/null +++ b/dev/docs/html/_static/locales/nl/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: nl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Afdrukken naar pdf" + +msgid "Theme by the" +msgstr "Thema door de" + +msgid "Download source file" +msgstr "Download het bronbestand" + +msgid "open issue" +msgstr "open probleem" + +msgid "Contents" +msgstr "Inhoud" + +msgid "previous page" +msgstr "vorige pagina" + +msgid "Download notebook file" +msgstr "Download notebookbestand" + +msgid "Copyright" +msgstr "auteursrechten" + +msgid "Download this page" +msgstr "Download deze pagina" + +msgid "Source repository" +msgstr "Bronopslagplaats" + +msgid "By" +msgstr "Door" + +msgid "repository" +msgstr "repository" + +msgid "Last updated on" +msgstr "Laatst geupdate op" + +msgid "Toggle navigation" +msgstr "Schakel navigatie" + +msgid "Sphinx Book Theme" +msgstr "Sphinx-boekthema" + +msgid "suggest edit" +msgstr "suggereren bewerken" + +msgid "Open an issue" +msgstr "Open een probleem" + +msgid "Launch" +msgstr "Lancering" + +msgid "Fullscreen mode" +msgstr "Volledig scherm" + +msgid "Edit this page" +msgstr "bewerk deze pagina" + +msgid "By the" +msgstr "Door de" + +msgid "next page" +msgstr "volgende bladzijde" diff --git a/dev/docs/html/_static/locales/no/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/no/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..6cd15c8 Binary files /dev/null and b/dev/docs/html/_static/locales/no/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/no/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/no/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..b21346a --- /dev/null +++ b/dev/docs/html/_static/locales/no/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: no\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Skriv ut til PDF" + +msgid "Theme by the" +msgstr "Tema av" + +msgid "Download source file" +msgstr "Last ned kildefilen" + +msgid "open issue" +msgstr "åpent nummer" + +msgid "Contents" +msgstr "Innhold" + +msgid "previous page" +msgstr "forrige side" + +msgid "Download notebook file" +msgstr "Last ned notatbokfilen" + +msgid "Copyright" +msgstr "opphavsrett" + +msgid "Download this page" +msgstr "Last ned denne siden" + +msgid "Source repository" +msgstr "Kildedepot" + +msgid "By" +msgstr "Av" + +msgid "repository" +msgstr "oppbevaringssted" + +msgid "Last updated on" +msgstr "Sist oppdatert den" + +msgid "Toggle navigation" +msgstr "Bytt navigasjon" + +msgid "Sphinx Book Theme" +msgstr "Sphinx boktema" + +msgid "suggest edit" +msgstr "foreslå redigering" + +msgid "Open an issue" +msgstr "Åpne et problem" + +msgid "Launch" +msgstr "Start" + +msgid "Fullscreen mode" +msgstr "Fullskjerm-modus" + +msgid "Edit this page" +msgstr "Rediger denne siden" + +msgid "By the" +msgstr "Ved" + +msgid "next page" +msgstr "neste side" diff --git a/dev/docs/html/_static/locales/pl/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/pl/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..9ebb584 Binary files /dev/null and b/dev/docs/html/_static/locales/pl/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/pl/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/pl/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..1b7233f --- /dev/null +++ b/dev/docs/html/_static/locales/pl/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: pl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Drukuj do PDF" + +msgid "Theme by the" +msgstr "Motyw autorstwa" + +msgid "Download source file" +msgstr "Pobierz plik źródłowy" + +msgid "open issue" +msgstr "otwarty problem" + +msgid "Contents" +msgstr "Zawartość" + +msgid "previous page" +msgstr "Poprzednia strona" + +msgid "Download notebook file" +msgstr "Pobierz plik notatnika" + +msgid "Copyright" +msgstr "prawa autorskie" + +msgid "Download this page" +msgstr "Pobierz tę stronę" + +msgid "Source repository" +msgstr "Repozytorium źródłowe" + +msgid "By" +msgstr "Przez" + +msgid "repository" +msgstr "magazyn" + +msgid "Last updated on" +msgstr "Ostatnia aktualizacja" + +msgid "Toggle navigation" +msgstr "Przełącz nawigację" + +msgid "Sphinx Book Theme" +msgstr "Motyw książki Sphinx" + +msgid "suggest edit" +msgstr "zaproponuj edycję" + +msgid "Open an issue" +msgstr "Otwórz problem" + +msgid "Launch" +msgstr "Uruchomić" + +msgid "Fullscreen mode" +msgstr "Pełny ekran" + +msgid "Edit this page" +msgstr "Edytuj tę strone" + +msgid "By the" +msgstr "Przez" + +msgid "next page" +msgstr "Następna strona" diff --git a/dev/docs/html/_static/locales/pt/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/pt/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..d0ddb87 Binary files /dev/null and b/dev/docs/html/_static/locales/pt/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/pt/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/pt/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..1b27314 --- /dev/null +++ b/dev/docs/html/_static/locales/pt/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: pt\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Imprimir em PDF" + +msgid "Theme by the" +msgstr "Tema por" + +msgid "Download source file" +msgstr "Baixar arquivo fonte" + +msgid "open issue" +msgstr "questão aberta" + +msgid "Contents" +msgstr "Conteúdo" + +msgid "previous page" +msgstr "página anterior" + +msgid "Download notebook file" +msgstr "Baixar arquivo de notebook" + +msgid "Copyright" +msgstr "direito autoral" + +msgid "Download this page" +msgstr "Baixe esta página" + +msgid "Source repository" +msgstr "Repositório fonte" + +msgid "By" +msgstr "De" + +msgid "repository" +msgstr "repositório" + +msgid "Last updated on" +msgstr "Última atualização em" + +msgid "Toggle navigation" +msgstr "Alternar de navegação" + +msgid "Sphinx Book Theme" +msgstr "Tema do livro Sphinx" + +msgid "suggest edit" +msgstr "sugerir edição" + +msgid "Open an issue" +msgstr "Abra um problema" + +msgid "Launch" +msgstr "Lançamento" + +msgid "Fullscreen mode" +msgstr "Modo tela cheia" + +msgid "Edit this page" +msgstr "Edite essa página" + +msgid "By the" +msgstr "Pelo" + +msgid "next page" +msgstr "próxima página" diff --git a/dev/docs/html/_static/locales/ro/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/ro/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..3c36ab1 Binary files /dev/null and b/dev/docs/html/_static/locales/ro/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/ro/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/ro/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..1783ad2 --- /dev/null +++ b/dev/docs/html/_static/locales/ro/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ro\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Imprimați în PDF" + +msgid "Theme by the" +msgstr "Tema de" + +msgid "Download source file" +msgstr "Descărcați fișierul sursă" + +msgid "open issue" +msgstr "problema deschisă" + +msgid "Contents" +msgstr "Cuprins" + +msgid "previous page" +msgstr "pagina anterioară" + +msgid "Download notebook file" +msgstr "Descărcați fișierul notebook" + +msgid "Copyright" +msgstr "Drepturi de autor" + +msgid "Download this page" +msgstr "Descarcă această pagină" + +msgid "Source repository" +msgstr "Depozit sursă" + +msgid "By" +msgstr "De" + +msgid "repository" +msgstr "repertoriu" + +msgid "Last updated on" +msgstr "Ultima actualizare la" + +msgid "Toggle navigation" +msgstr "Comutare navigare" + +msgid "Sphinx Book Theme" +msgstr "Tema Sphinx Book" + +msgid "suggest edit" +msgstr "sugerează editare" + +msgid "Open an issue" +msgstr "Deschideți o problemă" + +msgid "Launch" +msgstr "Lansa" + +msgid "Fullscreen mode" +msgstr "Modul ecran întreg" + +msgid "Edit this page" +msgstr "Editați această pagină" + +msgid "By the" +msgstr "Langa" + +msgid "next page" +msgstr "pagina următoare" diff --git a/dev/docs/html/_static/locales/ru/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/ru/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..6b8ca41 Binary files /dev/null and b/dev/docs/html/_static/locales/ru/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/ru/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/ru/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..b1176b7 --- /dev/null +++ b/dev/docs/html/_static/locales/ru/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ru\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Распечатать в PDF" + +msgid "Theme by the" +msgstr "Тема от" + +msgid "Download source file" +msgstr "Скачать исходный файл" + +msgid "open issue" +msgstr "открытый вопрос" + +msgid "Contents" +msgstr "Содержание" + +msgid "previous page" +msgstr "Предыдущая страница" + +msgid "Download notebook file" +msgstr "Скачать файл записной книжки" + +msgid "Copyright" +msgstr "авторское право" + +msgid "Download this page" +msgstr "Загрузите эту страницу" + +msgid "Source repository" +msgstr "Исходный репозиторий" + +msgid "By" +msgstr "По" + +msgid "repository" +msgstr "хранилище" + +msgid "Last updated on" +msgstr "Последнее обновление" + +msgid "Toggle navigation" +msgstr "Переключить навигацию" + +msgid "Sphinx Book Theme" +msgstr "Тема книги Сфинкс" + +msgid "suggest edit" +msgstr "предложить редактировать" + +msgid "Open an issue" +msgstr "Открыть вопрос" + +msgid "Launch" +msgstr "Запуск" + +msgid "Fullscreen mode" +msgstr "Полноэкранный режим" + +msgid "Edit this page" +msgstr "Редактировать эту страницу" + +msgid "By the" +msgstr "Посредством" + +msgid "next page" +msgstr "Следующая страница" diff --git a/dev/docs/html/_static/locales/sk/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/sk/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..59bd0dd Binary files /dev/null and b/dev/docs/html/_static/locales/sk/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/sk/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/sk/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..6501288 --- /dev/null +++ b/dev/docs/html/_static/locales/sk/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sk\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Tlač do PDF" + +msgid "Theme by the" +msgstr "Téma od" + +msgid "Download source file" +msgstr "Stiahnite si zdrojový súbor" + +msgid "open issue" +msgstr "otvorené vydanie" + +msgid "Contents" +msgstr "Obsah" + +msgid "previous page" +msgstr "predchádzajúca strana" + +msgid "Download notebook file" +msgstr "Stiahnite si zošit" + +msgid "Copyright" +msgstr "Autorské práva" + +msgid "Download this page" +msgstr "Stiahnite si túto stránku" + +msgid "Source repository" +msgstr "Zdrojové úložisko" + +msgid "By" +msgstr "Autor:" + +msgid "repository" +msgstr "Úložisko" + +msgid "Last updated on" +msgstr "Posledná aktualizácia dňa" + +msgid "Toggle navigation" +msgstr "Prepnúť navigáciu" + +msgid "Sphinx Book Theme" +msgstr "Téma knihy Sfinga" + +msgid "suggest edit" +msgstr "navrhnúť úpravu" + +msgid "Open an issue" +msgstr "Otvorte problém" + +msgid "Launch" +msgstr "Spustiť" + +msgid "Fullscreen mode" +msgstr "Režim celej obrazovky" + +msgid "Edit this page" +msgstr "Upraviť túto stránku" + +msgid "By the" +msgstr "Podľa" + +msgid "next page" +msgstr "ďalšia strana" diff --git a/dev/docs/html/_static/locales/sl/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/sl/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..87bf26d Binary files /dev/null and b/dev/docs/html/_static/locales/sl/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/sl/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/sl/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..3c7e3a8 --- /dev/null +++ b/dev/docs/html/_static/locales/sl/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Natisni v PDF" + +msgid "Theme by the" +msgstr "Tema avtorja" + +msgid "Download source file" +msgstr "Prenesite izvorno datoteko" + +msgid "open issue" +msgstr "odprto vprašanje" + +msgid "Contents" +msgstr "Vsebina" + +msgid "previous page" +msgstr "Prejšnja stran" + +msgid "Download notebook file" +msgstr "Prenesite datoteko zvezka" + +msgid "Copyright" +msgstr "avtorske pravice" + +msgid "Download this page" +msgstr "Prenesite to stran" + +msgid "Source repository" +msgstr "Izvorno skladišče" + +msgid "By" +msgstr "Avtor" + +msgid "repository" +msgstr "odlagališče" + +msgid "Last updated on" +msgstr "Nazadnje posodobljeno dne" + +msgid "Toggle navigation" +msgstr "Preklopi navigacijo" + +msgid "Sphinx Book Theme" +msgstr "Tema knjige Sphinx" + +msgid "suggest edit" +msgstr "predlagajte urejanje" + +msgid "Open an issue" +msgstr "Odprite številko" + +msgid "Launch" +msgstr "Kosilo" + +msgid "Fullscreen mode" +msgstr "Celozaslonski način" + +msgid "Edit this page" +msgstr "Uredite to stran" + +msgid "By the" +msgstr "Avtor" + +msgid "next page" +msgstr "Naslednja stran" diff --git a/dev/docs/html/_static/locales/sr/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/sr/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..ec740f4 Binary files /dev/null and b/dev/docs/html/_static/locales/sr/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/sr/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/sr/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..773b8ad --- /dev/null +++ b/dev/docs/html/_static/locales/sr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Испис у ПДФ" + +msgid "Theme by the" +msgstr "Тхеме би" + +msgid "Download source file" +msgstr "Преузми изворну датотеку" + +msgid "open issue" +msgstr "отворено издање" + +msgid "Contents" +msgstr "Садржај" + +msgid "previous page" +msgstr "Претходна страница" + +msgid "Download notebook file" +msgstr "Преузмите датотеку бележнице" + +msgid "Copyright" +msgstr "Ауторско право" + +msgid "Download this page" +msgstr "Преузмите ову страницу" + +msgid "Source repository" +msgstr "Изворно спремиште" + +msgid "By" +msgstr "Од стране" + +msgid "repository" +msgstr "спремиште" + +msgid "Last updated on" +msgstr "Последње ажурирање" + +msgid "Toggle navigation" +msgstr "Укључи / искључи навигацију" + +msgid "Sphinx Book Theme" +msgstr "Тема књиге Спхинк" + +msgid "suggest edit" +msgstr "предложи уређивање" + +msgid "Open an issue" +msgstr "Отворите издање" + +msgid "Launch" +msgstr "Лансирање" + +msgid "Fullscreen mode" +msgstr "Режим целог екрана" + +msgid "Edit this page" +msgstr "Уредите ову страницу" + +msgid "By the" +msgstr "Од" + +msgid "next page" +msgstr "Следећа страна" diff --git a/dev/docs/html/_static/locales/sv/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/sv/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..b07dc76 Binary files /dev/null and b/dev/docs/html/_static/locales/sv/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/sv/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/sv/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..bcac54c --- /dev/null +++ b/dev/docs/html/_static/locales/sv/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sv\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Skriv ut till PDF" + +msgid "Theme by the" +msgstr "Tema av" + +msgid "Download source file" +msgstr "Ladda ner källfil" + +msgid "open issue" +msgstr "öppna problemrapport" + +msgid "Contents" +msgstr "Innehåll" + +msgid "previous page" +msgstr "föregående sida" + +msgid "Download notebook file" +msgstr "Ladda ner notebook-fil" + +msgid "Copyright" +msgstr "Upphovsrätt" + +msgid "Download this page" +msgstr "Ladda ner den här sidan" + +msgid "Source repository" +msgstr "Källkodsrepositorium" + +msgid "By" +msgstr "Av" + +msgid "repository" +msgstr "repositorium" + +msgid "Last updated on" +msgstr "Senast uppdaterad den" + +msgid "Toggle navigation" +msgstr "Växla navigering" + +msgid "Sphinx Book Theme" +msgstr "Sphinx Boktema" + +msgid "suggest edit" +msgstr "föreslå ändring" + +msgid "Open an issue" +msgstr "Öppna en problemrapport" + +msgid "Launch" +msgstr "Öppna" + +msgid "Fullscreen mode" +msgstr "Fullskärmsläge" + +msgid "Edit this page" +msgstr "Redigera den här sidan" + +msgid "By the" +msgstr "Av den" + +msgid "next page" +msgstr "nästa sida" diff --git a/dev/docs/html/_static/locales/ta/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/ta/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..29f52e1 Binary files /dev/null and b/dev/docs/html/_static/locales/ta/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/ta/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/ta/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..b48bdfa --- /dev/null +++ b/dev/docs/html/_static/locales/ta/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ta\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDF இல் அச்சிடுக" + +msgid "Theme by the" +msgstr "வழங்கிய தீம்" + +msgid "Download source file" +msgstr "மூல கோப்பைப் பதிவிறக்குக" + +msgid "open issue" +msgstr "திறந்த பிரச்சினை" + +msgid "previous page" +msgstr "முந்தைய பக்கம்" + +msgid "Download notebook file" +msgstr "நோட்புக் கோப்பைப் பதிவிறக்கவும்" + +msgid "Copyright" +msgstr "பதிப்புரிமை" + +msgid "Download this page" +msgstr "இந்தப் பக்கத்தைப் பதிவிறக்கவும்" + +msgid "Source repository" +msgstr "மூல களஞ்சியம்" + +msgid "By" +msgstr "வழங்கியவர்" + +msgid "Last updated on" +msgstr "கடைசியாக புதுப்பிக்கப்பட்டது" + +msgid "Toggle navigation" +msgstr "வழிசெலுத்தலை நிலைமாற்று" + +msgid "Sphinx Book Theme" +msgstr "ஸ்பிங்க்ஸ் புத்தக தீம்" + +msgid "suggest edit" +msgstr "திருத்த பரிந்துரைக்கவும்" + +msgid "Open an issue" +msgstr "சிக்கலைத் திறக்கவும்" + +msgid "Launch" +msgstr "தொடங்க" + +msgid "Edit this page" +msgstr "இந்தப் பக்கத்தைத் திருத்தவும்" + +msgid "By the" +msgstr "மூலம்" + +msgid "next page" +msgstr "அடுத்த பக்கம்" diff --git a/dev/docs/html/_static/locales/te/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/te/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..0a5f4b4 Binary files /dev/null and b/dev/docs/html/_static/locales/te/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/te/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/te/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..952278f --- /dev/null +++ b/dev/docs/html/_static/locales/te/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: te\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDF కి ముద్రించండి" + +msgid "Theme by the" +msgstr "ద్వారా థీమ్" + +msgid "Download source file" +msgstr "మూల ఫైల్‌ను డౌన్‌లోడ్ చేయండి" + +msgid "open issue" +msgstr "ఓపెన్ ఇష్యూ" + +msgid "previous page" +msgstr "ముందు పేజి" + +msgid "Download notebook file" +msgstr "నోట్బుక్ ఫైల్ను డౌన్లోడ్ చేయండి" + +msgid "Copyright" +msgstr "కాపీరైట్" + +msgid "Download this page" +msgstr "ఈ పేజీని డౌన్‌లోడ్ చేయండి" + +msgid "Source repository" +msgstr "మూల రిపోజిటరీ" + +msgid "By" +msgstr "ద్వారా" + +msgid "Last updated on" +msgstr "చివరిగా నవీకరించబడింది" + +msgid "Toggle navigation" +msgstr "నావిగేషన్‌ను టోగుల్ చేయండి" + +msgid "Sphinx Book Theme" +msgstr "సింహిక పుస్తక థీమ్" + +msgid "suggest edit" +msgstr "సవరించమని సూచించండి" + +msgid "Open an issue" +msgstr "సమస్యను తెరవండి" + +msgid "Launch" +msgstr "ప్రారంభించండి" + +msgid "Edit this page" +msgstr "ఈ పేజీని సవరించండి" + +msgid "By the" +msgstr "ద్వారా" + +msgid "next page" +msgstr "తరువాతి పేజీ" diff --git a/dev/docs/html/_static/locales/tg/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/tg/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..b21c6c6 Binary files /dev/null and b/dev/docs/html/_static/locales/tg/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/tg/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/tg/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..c33dc42 --- /dev/null +++ b/dev/docs/html/_static/locales/tg/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tg\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Чоп ба PDF" + +msgid "Theme by the" +msgstr "Мавзӯъи аз" + +msgid "Download source file" +msgstr "Файли манбаъро зеркашӣ кунед" + +msgid "open issue" +msgstr "барориши кушод" + +msgid "Contents" +msgstr "Мундариҷа" + +msgid "previous page" +msgstr "саҳифаи қаблӣ" + +msgid "Download notebook file" +msgstr "Файли дафтарро зеркашӣ кунед" + +msgid "Copyright" +msgstr "Ҳуқуқи муаллиф" + +msgid "Download this page" +msgstr "Ин саҳифаро зеркашӣ кунед" + +msgid "Source repository" +msgstr "Анбори манбаъ" + +msgid "By" +msgstr "Бо" + +msgid "repository" +msgstr "анбор" + +msgid "Last updated on" +msgstr "Last навсозӣ дар" + +msgid "Toggle navigation" +msgstr "Гузаришро иваз кунед" + +msgid "Sphinx Book Theme" +msgstr "Сфинкс Мавзӯи китоб" + +msgid "suggest edit" +msgstr "пешниҳод вироиш" + +msgid "Open an issue" +msgstr "Масъаларо кушоед" + +msgid "Launch" +msgstr "Оғоз" + +msgid "Fullscreen mode" +msgstr "Ҳолати экрани пурра" + +msgid "Edit this page" +msgstr "Ин саҳифаро таҳрир кунед" + +msgid "By the" +msgstr "Бо" + +msgid "next page" +msgstr "саҳифаи оянда" diff --git a/dev/docs/html/_static/locales/th/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/th/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..abede98 Binary files /dev/null and b/dev/docs/html/_static/locales/th/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/th/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/th/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..9d24294 --- /dev/null +++ b/dev/docs/html/_static/locales/th/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: th\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "พิมพ์เป็น PDF" + +msgid "Theme by the" +msgstr "ธีมโดย" + +msgid "Download source file" +msgstr "ดาวน์โหลดไฟล์ต้นฉบับ" + +msgid "open issue" +msgstr "เปิดปัญหา" + +msgid "Contents" +msgstr "สารบัญ" + +msgid "previous page" +msgstr "หน้าที่แล้ว" + +msgid "Download notebook file" +msgstr "ดาวน์โหลดไฟล์สมุดบันทึก" + +msgid "Copyright" +msgstr "ลิขสิทธิ์" + +msgid "Download this page" +msgstr "ดาวน์โหลดหน้านี้" + +msgid "Source repository" +msgstr "ที่เก็บซอร์ส" + +msgid "By" +msgstr "โดย" + +msgid "repository" +msgstr "ที่เก็บ" + +msgid "Last updated on" +msgstr "ปรับปรุงล่าสุดเมื่อ" + +msgid "Toggle navigation" +msgstr "ไม่ต้องสลับช่องทาง" + +msgid "Sphinx Book Theme" +msgstr "ธีมหนังสือสฟิงซ์" + +msgid "suggest edit" +msgstr "แนะนำแก้ไข" + +msgid "Open an issue" +msgstr "เปิดปัญหา" + +msgid "Launch" +msgstr "เปิด" + +msgid "Fullscreen mode" +msgstr "โหมดเต็มหน้าจอ" + +msgid "Edit this page" +msgstr "แก้ไขหน้านี้" + +msgid "By the" +msgstr "โดย" + +msgid "next page" +msgstr "หน้าต่อไป" diff --git a/dev/docs/html/_static/locales/tl/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/tl/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..8df1b73 Binary files /dev/null and b/dev/docs/html/_static/locales/tl/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/tl/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/tl/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..20e0d07 --- /dev/null +++ b/dev/docs/html/_static/locales/tl/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "I-print sa PDF" + +msgid "Theme by the" +msgstr "Tema ng" + +msgid "Download source file" +msgstr "Mag-download ng file ng pinagmulan" + +msgid "open issue" +msgstr "bukas na isyu" + +msgid "previous page" +msgstr "Nakaraang pahina" + +msgid "Download notebook file" +msgstr "Mag-download ng file ng notebook" + +msgid "Copyright" +msgstr "Copyright" + +msgid "Download this page" +msgstr "I-download ang pahinang ito" + +msgid "Source repository" +msgstr "Pinagmulan ng imbakan" + +msgid "By" +msgstr "Ni" + +msgid "Last updated on" +msgstr "Huling na-update noong" + +msgid "Toggle navigation" +msgstr "I-toggle ang pag-navigate" + +msgid "Sphinx Book Theme" +msgstr "Tema ng Sphinx Book" + +msgid "suggest edit" +msgstr "iminumungkahi i-edit" + +msgid "Open an issue" +msgstr "Magbukas ng isyu" + +msgid "Launch" +msgstr "Ilunsad" + +msgid "Edit this page" +msgstr "I-edit ang pahinang ito" + +msgid "By the" +msgstr "Sa pamamagitan ng" + +msgid "next page" +msgstr "Susunod na pahina" diff --git a/dev/docs/html/_static/locales/tr/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/tr/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..029ae18 Binary files /dev/null and b/dev/docs/html/_static/locales/tr/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/tr/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/tr/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..a77eb02 --- /dev/null +++ b/dev/docs/html/_static/locales/tr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "PDF olarak yazdır" + +msgid "Theme by the" +msgstr "Tarafından tema" + +msgid "Download source file" +msgstr "Kaynak dosyayı indirin" + +msgid "open issue" +msgstr "Açık konu" + +msgid "Contents" +msgstr "İçindekiler" + +msgid "previous page" +msgstr "önceki sayfa" + +msgid "Download notebook file" +msgstr "Defter dosyasını indirin" + +msgid "Copyright" +msgstr "Telif hakkı" + +msgid "Download this page" +msgstr "Bu sayfayı indirin" + +msgid "Source repository" +msgstr "Kaynak kod deposu" + +msgid "By" +msgstr "Tarafından" + +msgid "repository" +msgstr "depo" + +msgid "Last updated on" +msgstr "Son güncelleme tarihi" + +msgid "Toggle navigation" +msgstr "Gezinmeyi değiştir" + +msgid "Sphinx Book Theme" +msgstr "Sfenks Kitap Teması" + +msgid "suggest edit" +msgstr "düzenleme öner" + +msgid "Open an issue" +msgstr "Bir sorunu açın" + +msgid "Launch" +msgstr "Başlatmak" + +msgid "Fullscreen mode" +msgstr "Tam ekran modu" + +msgid "Edit this page" +msgstr "Bu sayfayı düzenle" + +msgid "By the" +msgstr "Tarafından" + +msgid "next page" +msgstr "sonraki Sayfa" diff --git a/dev/docs/html/_static/locales/uk/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/uk/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..16ab789 Binary files /dev/null and b/dev/docs/html/_static/locales/uk/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/uk/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/uk/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..993dd07 --- /dev/null +++ b/dev/docs/html/_static/locales/uk/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: uk\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "Друк у форматі PDF" + +msgid "Theme by the" +msgstr "Тема від" + +msgid "Download source file" +msgstr "Завантажити вихідний файл" + +msgid "open issue" +msgstr "відкритий випуск" + +msgid "Contents" +msgstr "Зміст" + +msgid "previous page" +msgstr "Попередня сторінка" + +msgid "Download notebook file" +msgstr "Завантажте файл блокнота" + +msgid "Copyright" +msgstr "Авторське право" + +msgid "Download this page" +msgstr "Завантажте цю сторінку" + +msgid "Source repository" +msgstr "Джерело сховища" + +msgid "By" +msgstr "Автор" + +msgid "repository" +msgstr "сховище" + +msgid "Last updated on" +msgstr "Останнє оновлення:" + +msgid "Toggle navigation" +msgstr "Переключити навігацію" + +msgid "Sphinx Book Theme" +msgstr "Тема книги \"Сфінкс\"" + +msgid "suggest edit" +msgstr "запропонувати редагувати" + +msgid "Open an issue" +msgstr "Відкрийте випуск" + +msgid "Launch" +msgstr "Запуск" + +msgid "Fullscreen mode" +msgstr "Повноекранний режим" + +msgid "Edit this page" +msgstr "Редагувати цю сторінку" + +msgid "By the" +msgstr "По" + +msgid "next page" +msgstr "Наступна сторінка" diff --git a/dev/docs/html/_static/locales/ur/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/ur/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..de8c84b Binary files /dev/null and b/dev/docs/html/_static/locales/ur/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/ur/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/ur/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..2f77426 --- /dev/null +++ b/dev/docs/html/_static/locales/ur/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ur\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "پی ڈی ایف پرنٹ کریں" + +msgid "Theme by the" +msgstr "کے ذریعہ تھیم" + +msgid "Download source file" +msgstr "سورس فائل ڈاؤن لوڈ کریں" + +msgid "open issue" +msgstr "کھلا مسئلہ" + +msgid "previous page" +msgstr "سابقہ ​​صفحہ" + +msgid "Download notebook file" +msgstr "نوٹ بک فائل ڈاؤن لوڈ کریں" + +msgid "Copyright" +msgstr "کاپی رائٹ" + +msgid "Download this page" +msgstr "اس صفحے کو ڈاؤن لوڈ کریں" + +msgid "Source repository" +msgstr "ماخذ ذخیرہ" + +msgid "By" +msgstr "بذریعہ" + +msgid "Last updated on" +msgstr "آخری بار تازہ کاری ہوئی" + +msgid "Toggle navigation" +msgstr "نیویگیشن ٹوگل کریں" + +msgid "Sphinx Book Theme" +msgstr "سپنکس بک تھیم" + +msgid "suggest edit" +msgstr "ترمیم کی تجویز کریں" + +msgid "Open an issue" +msgstr "ایک مسئلہ کھولیں" + +msgid "Launch" +msgstr "لانچ کریں" + +msgid "Edit this page" +msgstr "اس صفحے میں ترمیم کریں" + +msgid "By the" +msgstr "کی طرف" + +msgid "next page" +msgstr "اگلا صفحہ" diff --git a/dev/docs/html/_static/locales/vi/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/vi/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..2bb3255 Binary files /dev/null and b/dev/docs/html/_static/locales/vi/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/vi/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/vi/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..33159f3 --- /dev/null +++ b/dev/docs/html/_static/locales/vi/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: vi\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "In sang PDF" + +msgid "Theme by the" +msgstr "Chủ đề của" + +msgid "Download source file" +msgstr "Tải xuống tệp nguồn" + +msgid "open issue" +msgstr "vấn đề mở" + +msgid "Contents" +msgstr "Nội dung" + +msgid "previous page" +msgstr "trang trước" + +msgid "Download notebook file" +msgstr "Tải xuống tệp sổ tay" + +msgid "Copyright" +msgstr "Bản quyền" + +msgid "Download this page" +msgstr "Tải xuống trang này" + +msgid "Source repository" +msgstr "Kho nguồn" + +msgid "By" +msgstr "Bởi" + +msgid "repository" +msgstr "kho" + +msgid "Last updated on" +msgstr "Cập nhật lần cuối vào" + +msgid "Toggle navigation" +msgstr "Chuyển đổi điều hướng thành" + +msgid "Sphinx Book Theme" +msgstr "Chủ đề sách nhân sư" + +msgid "suggest edit" +msgstr "đề nghị chỉnh sửa" + +msgid "Open an issue" +msgstr "Mở một vấn đề" + +msgid "Launch" +msgstr "Phóng" + +msgid "Fullscreen mode" +msgstr "Chế độ toàn màn hình" + +msgid "Edit this page" +msgstr "chỉnh sửa trang này" + +msgid "By the" +msgstr "Bằng" + +msgid "next page" +msgstr "Trang tiếp theo" diff --git a/dev/docs/html/_static/locales/zh_CN/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/zh_CN/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..0e3235d Binary files /dev/null and b/dev/docs/html/_static/locales/zh_CN/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/zh_CN/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/zh_CN/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..2e519ef --- /dev/null +++ b/dev/docs/html/_static/locales/zh_CN/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "列印成 PDF" + +msgid "Theme by the" +msgstr "主题作者:" + +msgid "Download source file" +msgstr "下载源文件" + +msgid "open issue" +msgstr "创建议题" + +msgid "Contents" +msgstr "目录" + +msgid "previous page" +msgstr "上一页" + +msgid "Download notebook file" +msgstr "下载笔记本文件" + +msgid "Copyright" +msgstr "版权" + +msgid "Download this page" +msgstr "下载此页面" + +msgid "Source repository" +msgstr "源码库" + +msgid "By" +msgstr "作者:" + +msgid "repository" +msgstr "仓库" + +msgid "Last updated on" +msgstr "上次更新时间:" + +msgid "Toggle navigation" +msgstr "显示或隐藏导航栏" + +msgid "Sphinx Book Theme" +msgstr "Sphinx Book 主题" + +msgid "suggest edit" +msgstr "提出修改建议" + +msgid "Open an issue" +msgstr "创建议题" + +msgid "Launch" +msgstr "启动" + +msgid "Fullscreen mode" +msgstr "全屏模式" + +msgid "Edit this page" +msgstr "编辑此页面" + +msgid "By the" +msgstr "作者:" + +msgid "next page" +msgstr "下一页" diff --git a/dev/docs/html/_static/locales/zh_TW/LC_MESSAGES/booktheme.mo b/dev/docs/html/_static/locales/zh_TW/LC_MESSAGES/booktheme.mo new file mode 100644 index 0000000..9116fa9 Binary files /dev/null and b/dev/docs/html/_static/locales/zh_TW/LC_MESSAGES/booktheme.mo differ diff --git a/dev/docs/html/_static/locales/zh_TW/LC_MESSAGES/booktheme.po b/dev/docs/html/_static/locales/zh_TW/LC_MESSAGES/booktheme.po new file mode 100644 index 0000000..beecb07 --- /dev/null +++ b/dev/docs/html/_static/locales/zh_TW/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_TW\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Print to PDF" +msgstr "列印成 PDF" + +msgid "Theme by the" +msgstr "佈景主題作者:" + +msgid "Download source file" +msgstr "下載原始檔" + +msgid "open issue" +msgstr "公開的問題" + +msgid "Contents" +msgstr "目錄" + +msgid "previous page" +msgstr "上一頁" + +msgid "Download notebook file" +msgstr "下載 Notebook 檔案" + +msgid "Copyright" +msgstr "Copyright" + +msgid "Download this page" +msgstr "下載此頁面" + +msgid "Source repository" +msgstr "來源儲存庫" + +msgid "By" +msgstr "作者:" + +msgid "repository" +msgstr "儲存庫" + +msgid "Last updated on" +msgstr "最後更新時間:" + +msgid "Toggle navigation" +msgstr "顯示或隱藏導覽列" + +msgid "Sphinx Book Theme" +msgstr "Sphinx Book 佈景主題" + +msgid "suggest edit" +msgstr "提出修改建議" + +msgid "Open an issue" +msgstr "開啟議題" + +msgid "Launch" +msgstr "啟動" + +msgid "Fullscreen mode" +msgstr "全螢幕模式" + +msgid "Edit this page" +msgstr "編輯此頁面" + +msgid "By the" +msgstr "作者:" + +msgid "next page" +msgstr "下一頁" diff --git a/dev/docs/html/_static/minus.png b/dev/docs/html/_static/minus.png new file mode 100644 index 0000000..d96755f Binary files /dev/null and b/dev/docs/html/_static/minus.png differ diff --git a/dev/docs/html/_static/nbsphinx-broken-thumbnail.svg b/dev/docs/html/_static/nbsphinx-broken-thumbnail.svg new file mode 100644 index 0000000..4919ca8 --- /dev/null +++ b/dev/docs/html/_static/nbsphinx-broken-thumbnail.svg @@ -0,0 +1,9 @@ + + + + diff --git a/dev/docs/html/_static/nbsphinx-code-cells.css b/dev/docs/html/_static/nbsphinx-code-cells.css new file mode 100644 index 0000000..a3fb27c --- /dev/null +++ b/dev/docs/html/_static/nbsphinx-code-cells.css @@ -0,0 +1,259 @@ +/* remove conflicting styling from Sphinx themes */ +div.nbinput.container div.prompt *, +div.nboutput.container div.prompt *, +div.nbinput.container div.input_area pre, +div.nboutput.container div.output_area pre, +div.nbinput.container div.input_area .highlight, +div.nboutput.container div.output_area .highlight { + border: none; + padding: 0; + margin: 0; + box-shadow: none; +} + +div.nbinput.container > div[class*=highlight], +div.nboutput.container > div[class*=highlight] { + margin: 0; +} + +div.nbinput.container div.prompt *, +div.nboutput.container div.prompt * { + background: none; +} + +div.nboutput.container div.output_area .highlight, +div.nboutput.container div.output_area pre { + background: unset; +} + +div.nboutput.container div.output_area div.highlight { + color: unset; /* override Pygments text color */ +} + +/* avoid gaps between output lines */ +div.nboutput.container div[class*=highlight] pre { + line-height: normal; +} + +/* input/output containers */ +div.nbinput.container, +div.nboutput.container { + display: -webkit-flex; + display: flex; + align-items: flex-start; + margin: 0; + width: 100%; +} +@media (max-width: 540px) { + div.nbinput.container, + div.nboutput.container { + flex-direction: column; + } +} + +/* input container */ +div.nbinput.container { + padding-top: 5px; +} + +/* last container */ +div.nblast.container { + padding-bottom: 5px; +} + +/* input prompt */ +div.nbinput.container div.prompt pre, +/* for sphinx_immaterial theme: */ +div.nbinput.container div.prompt pre > code { + color: #307FC1; +} + +/* output prompt */ +div.nboutput.container div.prompt pre, +/* for sphinx_immaterial theme: */ +div.nboutput.container div.prompt pre > code { + color: #BF5B3D; +} + +/* all prompts */ +div.nbinput.container div.prompt, +div.nboutput.container div.prompt { + width: 4.5ex; + padding-top: 5px; + position: relative; + user-select: none; +} + +div.nbinput.container div.prompt > div, +div.nboutput.container div.prompt > div { + position: absolute; + right: 0; + margin-right: 0.3ex; +} + +@media (max-width: 540px) { + div.nbinput.container div.prompt, + div.nboutput.container div.prompt { + width: unset; + text-align: left; + padding: 0.4em; + } + div.nboutput.container div.prompt.empty { + padding: 0; + } + + div.nbinput.container div.prompt > div, + div.nboutput.container div.prompt > div { + position: unset; + } +} + +/* disable scrollbars and line breaks on prompts */ +div.nbinput.container div.prompt pre, +div.nboutput.container div.prompt pre { + overflow: hidden; + white-space: pre; +} + +/* input/output area */ +div.nbinput.container div.input_area, +div.nboutput.container div.output_area { + -webkit-flex: 1; + flex: 1; + overflow: auto; +} +@media (max-width: 540px) { + div.nbinput.container div.input_area, + div.nboutput.container div.output_area { + width: 100%; + } +} + +/* input area */ +div.nbinput.container div.input_area { + border: 1px solid #e0e0e0; + border-radius: 2px; + /*background: #f5f5f5;*/ +} + +/* override MathJax center alignment in output cells */ +div.nboutput.container div[class*=MathJax] { + text-align: left !important; +} + +/* override sphinx.ext.imgmath center alignment in output cells */ +div.nboutput.container div.math p { + text-align: left; +} + +/* standard error */ +div.nboutput.container div.output_area.stderr { + background: #fdd; +} + +/* ANSI colors */ +.ansi-black-fg { color: #3E424D; } +.ansi-black-bg { background-color: #3E424D; } +.ansi-black-intense-fg { color: #282C36; } +.ansi-black-intense-bg { background-color: #282C36; } +.ansi-red-fg { color: #E75C58; } +.ansi-red-bg { background-color: #E75C58; } +.ansi-red-intense-fg { color: #B22B31; } +.ansi-red-intense-bg { background-color: #B22B31; } +.ansi-green-fg { color: #00A250; } +.ansi-green-bg { background-color: #00A250; } +.ansi-green-intense-fg { color: #007427; } +.ansi-green-intense-bg { background-color: #007427; } +.ansi-yellow-fg { color: #DDB62B; } +.ansi-yellow-bg { background-color: #DDB62B; } +.ansi-yellow-intense-fg { color: #B27D12; } +.ansi-yellow-intense-bg { background-color: #B27D12; } +.ansi-blue-fg { color: #208FFB; } +.ansi-blue-bg { background-color: #208FFB; } +.ansi-blue-intense-fg { color: #0065CA; } +.ansi-blue-intense-bg { background-color: #0065CA; } +.ansi-magenta-fg { color: #D160C4; } +.ansi-magenta-bg { background-color: #D160C4; } +.ansi-magenta-intense-fg { color: #A03196; } +.ansi-magenta-intense-bg { background-color: #A03196; } +.ansi-cyan-fg { color: #60C6C8; } +.ansi-cyan-bg { background-color: #60C6C8; } +.ansi-cyan-intense-fg { color: #258F8F; } +.ansi-cyan-intense-bg { background-color: #258F8F; } +.ansi-white-fg { color: #C5C1B4; } +.ansi-white-bg { background-color: #C5C1B4; } +.ansi-white-intense-fg { color: #A1A6B2; } +.ansi-white-intense-bg { background-color: #A1A6B2; } + +.ansi-default-inverse-fg { color: #FFFFFF; } +.ansi-default-inverse-bg { background-color: #000000; } + +.ansi-bold { font-weight: bold; } +.ansi-underline { text-decoration: underline; } + + +div.nbinput.container div.input_area div[class*=highlight] > pre, +div.nboutput.container div.output_area div[class*=highlight] > pre, +div.nboutput.container div.output_area div[class*=highlight].math, +div.nboutput.container div.output_area.rendered_html, +div.nboutput.container div.output_area > div.output_javascript, +div.nboutput.container div.output_area:not(.rendered_html) > img{ + padding: 5px; + margin: 0; +} + +/* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */ +div.nbinput.container div.input_area > div[class^='highlight'], +div.nboutput.container div.output_area > div[class^='highlight']{ + overflow-y: hidden; +} + +/* hide copy button on prompts for 'sphinx_copybutton' extension ... */ +.prompt .copybtn, +/* ... and 'sphinx_immaterial' theme */ +.prompt .md-clipboard.md-icon { + display: none; +} + +/* Some additional styling taken form the Jupyter notebook CSS */ +.jp-RenderedHTMLCommon table, +div.rendered_html table { + border: none; + border-collapse: collapse; + border-spacing: 0; + color: black; + font-size: 12px; + table-layout: fixed; +} +.jp-RenderedHTMLCommon thead, +div.rendered_html thead { + border-bottom: 1px solid black; + vertical-align: bottom; +} +.jp-RenderedHTMLCommon tr, +.jp-RenderedHTMLCommon th, +.jp-RenderedHTMLCommon td, +div.rendered_html tr, +div.rendered_html th, +div.rendered_html td { + text-align: right; + vertical-align: middle; + padding: 0.5em 0.5em; + line-height: normal; + white-space: normal; + max-width: none; + border: none; +} +.jp-RenderedHTMLCommon th, +div.rendered_html th { + font-weight: bold; +} +.jp-RenderedHTMLCommon tbody tr:nth-child(odd), +div.rendered_html tbody tr:nth-child(odd) { + background: #f5f5f5; +} +.jp-RenderedHTMLCommon tbody tr:hover, +div.rendered_html tbody tr:hover { + background: rgba(66, 165, 245, 0.2); +} + diff --git a/dev/docs/html/_static/nbsphinx-gallery.css b/dev/docs/html/_static/nbsphinx-gallery.css new file mode 100644 index 0000000..365c27a --- /dev/null +++ b/dev/docs/html/_static/nbsphinx-gallery.css @@ -0,0 +1,31 @@ +.nbsphinx-gallery { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(160px, 1fr)); + gap: 5px; + margin-top: 1em; + margin-bottom: 1em; +} + +.nbsphinx-gallery > a { + padding: 5px; + border: 1px dotted currentColor; + border-radius: 2px; + text-align: center; +} + +.nbsphinx-gallery > a:hover { + border-style: solid; +} + +.nbsphinx-gallery img { + max-width: 100%; + max-height: 100%; +} + +.nbsphinx-gallery > a > div:first-child { + display: flex; + align-items: start; + justify-content: center; + height: 120px; + margin-bottom: 5px; +} diff --git a/dev/docs/html/_static/nbsphinx-no-thumbnail.svg b/dev/docs/html/_static/nbsphinx-no-thumbnail.svg new file mode 100644 index 0000000..9dca758 --- /dev/null +++ b/dev/docs/html/_static/nbsphinx-no-thumbnail.svg @@ -0,0 +1,9 @@ + + + + diff --git a/dev/docs/html/_static/nonstationary_synthetic_FDC.png b/dev/docs/html/_static/nonstationary_synthetic_FDC.png new file mode 100644 index 0000000..ee1ba6e Binary files /dev/null and b/dev/docs/html/_static/nonstationary_synthetic_FDC.png differ diff --git a/dev/docs/html/_static/notebook_fishery_output_19_0.png b/dev/docs/html/_static/notebook_fishery_output_19_0.png new file mode 100644 index 0000000..1881ab4 Binary files /dev/null and b/dev/docs/html/_static/notebook_fishery_output_19_0.png differ diff --git a/dev/docs/html/_static/notebook_fishery_output_5_1.png b/dev/docs/html/_static/notebook_fishery_output_5_1.png new file mode 100644 index 0000000..e2b0354 Binary files /dev/null and b/dev/docs/html/_static/notebook_fishery_output_5_1.png differ diff --git a/dev/docs/html/_static/notebook_logistic_output_11_1.png b/dev/docs/html/_static/notebook_logistic_output_11_1.png new file mode 100644 index 0000000..6f7ca8e Binary files /dev/null and b/dev/docs/html/_static/notebook_logistic_output_11_1.png differ diff --git a/dev/docs/html/_static/output_100_0.png b/dev/docs/html/_static/output_100_0.png new file mode 100644 index 0000000..6bd90df Binary files /dev/null and b/dev/docs/html/_static/output_100_0.png differ diff --git a/dev/docs/html/_static/output_103_0.png b/dev/docs/html/_static/output_103_0.png new file mode 100644 index 0000000..8dba3af Binary files /dev/null and b/dev/docs/html/_static/output_103_0.png differ diff --git a/dev/docs/html/_static/output_106_0.png b/dev/docs/html/_static/output_106_0.png new file mode 100644 index 0000000..f65affd Binary files /dev/null and b/dev/docs/html/_static/output_106_0.png differ diff --git a/dev/docs/html/_static/output_16_0.png b/dev/docs/html/_static/output_16_0.png new file mode 100644 index 0000000..c0cf6aa Binary files /dev/null and b/dev/docs/html/_static/output_16_0.png differ diff --git a/dev/docs/html/_static/output_28_0.png b/dev/docs/html/_static/output_28_0.png new file mode 100644 index 0000000..989fd81 Binary files /dev/null and b/dev/docs/html/_static/output_28_0.png differ diff --git a/dev/docs/html/_static/output_40_0.png b/dev/docs/html/_static/output_40_0.png new file mode 100644 index 0000000..5a15ede Binary files /dev/null and b/dev/docs/html/_static/output_40_0.png differ diff --git a/dev/docs/html/_static/output_49_0.png b/dev/docs/html/_static/output_49_0.png new file mode 100644 index 0000000..263861a Binary files /dev/null and b/dev/docs/html/_static/output_49_0.png differ diff --git a/dev/docs/html/_static/output_52_0.png b/dev/docs/html/_static/output_52_0.png new file mode 100644 index 0000000..b4684b4 Binary files /dev/null and b/dev/docs/html/_static/output_52_0.png differ diff --git a/dev/docs/html/_static/output_59_0.png b/dev/docs/html/_static/output_59_0.png new file mode 100644 index 0000000..3ed812c Binary files /dev/null and b/dev/docs/html/_static/output_59_0.png differ diff --git a/dev/docs/html/_static/output_62_0.png b/dev/docs/html/_static/output_62_0.png new file mode 100644 index 0000000..cc3aaf6 Binary files /dev/null and b/dev/docs/html/_static/output_62_0.png differ diff --git a/dev/docs/html/_static/output_68_0.png b/dev/docs/html/_static/output_68_0.png new file mode 100644 index 0000000..5dc074c Binary files /dev/null and b/dev/docs/html/_static/output_68_0.png differ diff --git a/dev/docs/html/_static/output_71_0.png b/dev/docs/html/_static/output_71_0.png new file mode 100644 index 0000000..cb35a3f Binary files /dev/null and b/dev/docs/html/_static/output_71_0.png differ diff --git a/dev/docs/html/_static/output_7_0.png b/dev/docs/html/_static/output_7_0.png new file mode 100644 index 0000000..bde57bb Binary files /dev/null and b/dev/docs/html/_static/output_7_0.png differ diff --git a/dev/docs/html/_static/output_80_0.png b/dev/docs/html/_static/output_80_0.png new file mode 100644 index 0000000..a8daab5 Binary files /dev/null and b/dev/docs/html/_static/output_80_0.png differ diff --git a/dev/docs/html/_static/output_82_0.png b/dev/docs/html/_static/output_82_0.png new file mode 100644 index 0000000..07681c3 Binary files /dev/null and b/dev/docs/html/_static/output_82_0.png differ diff --git a/dev/docs/html/_static/output_87_0.png b/dev/docs/html/_static/output_87_0.png new file mode 100644 index 0000000..e5fc38c Binary files /dev/null and b/dev/docs/html/_static/output_87_0.png differ diff --git a/dev/docs/html/_static/output_88_0.png b/dev/docs/html/_static/output_88_0.png new file mode 100644 index 0000000..c34189f Binary files /dev/null and b/dev/docs/html/_static/output_88_0.png differ diff --git a/dev/docs/html/_static/output_94_0.png b/dev/docs/html/_static/output_94_0.png new file mode 100644 index 0000000..8c67a18 Binary files /dev/null and b/dev/docs/html/_static/output_94_0.png differ diff --git a/dev/docs/html/_static/output_99_0.png b/dev/docs/html/_static/output_99_0.png new file mode 100644 index 0000000..571cab3 Binary files /dev/null and b/dev/docs/html/_static/output_99_0.png differ diff --git a/dev/docs/html/_static/plus.png b/dev/docs/html/_static/plus.png new file mode 100644 index 0000000..7107cec Binary files /dev/null and b/dev/docs/html/_static/plus.png differ diff --git a/dev/docs/html/_static/pygments.css b/dev/docs/html/_static/pygments.css new file mode 100644 index 0000000..012e6a0 --- /dev/null +++ b/dev/docs/html/_static/pygments.css @@ -0,0 +1,152 @@ +html[data-theme="light"] .highlight pre { line-height: 125%; } +html[data-theme="light"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight .hll { background-color: #fae4c2 } +html[data-theme="light"] .highlight { background: #fefefe; color: #080808 } +html[data-theme="light"] .highlight .c { color: #515151 } /* Comment */ +html[data-theme="light"] .highlight .err { color: #a12236 } /* Error */ +html[data-theme="light"] .highlight .k { color: #6730c5 } /* Keyword */ +html[data-theme="light"] .highlight .l { color: #7f4707 } /* Literal */ +html[data-theme="light"] .highlight .n { color: #080808 } /* Name */ +html[data-theme="light"] .highlight .o { color: #00622f } /* Operator */ +html[data-theme="light"] .highlight .p { color: #080808 } /* Punctuation */ +html[data-theme="light"] .highlight .ch { color: #515151 } /* Comment.Hashbang */ +html[data-theme="light"] .highlight .cm { color: #515151 } /* Comment.Multiline */ +html[data-theme="light"] .highlight .cp { color: #515151 } /* Comment.Preproc */ +html[data-theme="light"] .highlight .cpf { color: #515151 } /* Comment.PreprocFile */ +html[data-theme="light"] .highlight .c1 { color: #515151 } /* Comment.Single */ +html[data-theme="light"] .highlight .cs { color: #515151 } /* Comment.Special */ +html[data-theme="light"] .highlight .gd { color: #005b82 } /* Generic.Deleted */ +html[data-theme="light"] .highlight .ge { font-style: italic } /* Generic.Emph */ +html[data-theme="light"] .highlight .gh { color: #005b82 } /* Generic.Heading */ +html[data-theme="light"] .highlight .gs { font-weight: bold } /* Generic.Strong */ +html[data-theme="light"] .highlight .gu { color: #005b82 } /* Generic.Subheading */ +html[data-theme="light"] .highlight .kc { color: #6730c5 } /* Keyword.Constant */ +html[data-theme="light"] .highlight .kd { color: #6730c5 } /* Keyword.Declaration */ +html[data-theme="light"] .highlight .kn { color: #6730c5 } /* Keyword.Namespace */ +html[data-theme="light"] .highlight .kp { color: #6730c5 } /* Keyword.Pseudo */ +html[data-theme="light"] .highlight .kr { color: #6730c5 } /* Keyword.Reserved */ +html[data-theme="light"] .highlight .kt { color: #7f4707 } /* Keyword.Type */ +html[data-theme="light"] .highlight .ld { color: #7f4707 } /* Literal.Date */ +html[data-theme="light"] .highlight .m { color: #7f4707 } /* Literal.Number */ +html[data-theme="light"] .highlight .s { color: #00622f } /* Literal.String */ +html[data-theme="light"] .highlight .na { color: #912583 } /* Name.Attribute */ +html[data-theme="light"] .highlight .nb { color: #7f4707 } /* Name.Builtin */ +html[data-theme="light"] .highlight .nc { color: #005b82 } /* Name.Class */ +html[data-theme="light"] .highlight .no { color: #005b82 } /* Name.Constant */ +html[data-theme="light"] .highlight .nd { color: #7f4707 } /* Name.Decorator */ +html[data-theme="light"] .highlight .ni { color: #00622f } /* Name.Entity */ +html[data-theme="light"] .highlight .ne { color: #6730c5 } /* Name.Exception */ +html[data-theme="light"] .highlight .nf { color: #005b82 } /* Name.Function */ +html[data-theme="light"] .highlight .nl { color: #7f4707 } /* Name.Label */ +html[data-theme="light"] .highlight .nn { color: #080808 } /* Name.Namespace */ +html[data-theme="light"] .highlight .nx { color: #080808 } /* Name.Other */ +html[data-theme="light"] .highlight .py { color: #005b82 } /* Name.Property */ +html[data-theme="light"] .highlight .nt { color: #005b82 } /* Name.Tag */ +html[data-theme="light"] .highlight .nv { color: #a12236 } /* Name.Variable */ +html[data-theme="light"] .highlight .ow { color: #6730c5 } /* Operator.Word */ +html[data-theme="light"] .highlight .pm { color: #080808 } /* Punctuation.Marker */ +html[data-theme="light"] .highlight .w { color: #080808 } /* Text.Whitespace */ +html[data-theme="light"] .highlight .mb { color: #7f4707 } /* Literal.Number.Bin */ +html[data-theme="light"] .highlight .mf { color: #7f4707 } /* Literal.Number.Float */ +html[data-theme="light"] .highlight .mh { color: #7f4707 } /* Literal.Number.Hex */ +html[data-theme="light"] .highlight .mi { color: #7f4707 } /* Literal.Number.Integer */ +html[data-theme="light"] .highlight .mo { color: #7f4707 } /* Literal.Number.Oct */ +html[data-theme="light"] .highlight .sa { color: #00622f } /* Literal.String.Affix */ +html[data-theme="light"] .highlight .sb { color: #00622f } /* Literal.String.Backtick */ +html[data-theme="light"] .highlight .sc { color: #00622f } /* Literal.String.Char */ +html[data-theme="light"] .highlight .dl { color: #00622f } /* Literal.String.Delimiter */ +html[data-theme="light"] .highlight .sd { color: #00622f } /* Literal.String.Doc */ +html[data-theme="light"] .highlight .s2 { color: #00622f } /* Literal.String.Double */ +html[data-theme="light"] .highlight .se { color: #00622f } /* Literal.String.Escape */ +html[data-theme="light"] .highlight .sh { color: #00622f } /* Literal.String.Heredoc */ +html[data-theme="light"] .highlight .si { color: #00622f } /* Literal.String.Interpol */ +html[data-theme="light"] .highlight .sx { color: #00622f } /* Literal.String.Other */ +html[data-theme="light"] .highlight .sr { color: #a12236 } /* Literal.String.Regex */ +html[data-theme="light"] .highlight .s1 { color: #00622f } /* Literal.String.Single */ +html[data-theme="light"] .highlight .ss { color: #005b82 } /* Literal.String.Symbol */ +html[data-theme="light"] .highlight .bp { color: #7f4707 } /* Name.Builtin.Pseudo */ +html[data-theme="light"] .highlight .fm { color: #005b82 } /* Name.Function.Magic */ +html[data-theme="light"] .highlight .vc { color: #a12236 } /* Name.Variable.Class */ +html[data-theme="light"] .highlight .vg { color: #a12236 } /* Name.Variable.Global */ +html[data-theme="light"] .highlight .vi { color: #a12236 } /* Name.Variable.Instance */ +html[data-theme="light"] .highlight .vm { color: #7f4707 } /* Name.Variable.Magic */ +html[data-theme="light"] .highlight .il { color: #7f4707 } /* Literal.Number.Integer.Long */ +html[data-theme="dark"] .highlight pre { line-height: 125%; } +html[data-theme="dark"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight .hll { background-color: #ffd9002e } +html[data-theme="dark"] .highlight { background: #2b2b2b; color: #f8f8f2 } +html[data-theme="dark"] .highlight .c { color: #ffd900 } /* Comment */ +html[data-theme="dark"] .highlight .err { color: #ffa07a } /* Error */ +html[data-theme="dark"] .highlight .k { color: #dcc6e0 } /* Keyword */ +html[data-theme="dark"] .highlight .l { color: #ffd900 } /* Literal */ +html[data-theme="dark"] .highlight .n { color: #f8f8f2 } /* Name */ +html[data-theme="dark"] .highlight .o { color: #abe338 } /* Operator */ +html[data-theme="dark"] .highlight .p { color: #f8f8f2 } /* Punctuation */ +html[data-theme="dark"] .highlight .ch { color: #ffd900 } /* Comment.Hashbang */ +html[data-theme="dark"] .highlight .cm { color: #ffd900 } /* Comment.Multiline */ +html[data-theme="dark"] .highlight .cp { color: #ffd900 } /* Comment.Preproc */ +html[data-theme="dark"] .highlight .cpf { color: #ffd900 } /* Comment.PreprocFile */ +html[data-theme="dark"] .highlight .c1 { color: #ffd900 } /* Comment.Single */ +html[data-theme="dark"] .highlight .cs { color: #ffd900 } /* Comment.Special */ +html[data-theme="dark"] .highlight .gd { color: #00e0e0 } /* Generic.Deleted */ +html[data-theme="dark"] .highlight .ge { font-style: italic } /* Generic.Emph */ +html[data-theme="dark"] .highlight .gh { color: #00e0e0 } /* Generic.Heading */ +html[data-theme="dark"] .highlight .gs { font-weight: bold } /* Generic.Strong */ +html[data-theme="dark"] .highlight .gu { color: #00e0e0 } /* Generic.Subheading */ +html[data-theme="dark"] .highlight .kc { color: #dcc6e0 } /* Keyword.Constant */ +html[data-theme="dark"] .highlight .kd { color: #dcc6e0 } /* Keyword.Declaration */ +html[data-theme="dark"] .highlight .kn { color: #dcc6e0 } /* Keyword.Namespace */ +html[data-theme="dark"] .highlight .kp { color: #dcc6e0 } /* Keyword.Pseudo */ +html[data-theme="dark"] .highlight .kr { color: #dcc6e0 } /* Keyword.Reserved */ +html[data-theme="dark"] .highlight .kt { color: #ffd900 } /* Keyword.Type */ +html[data-theme="dark"] .highlight .ld { color: #ffd900 } /* Literal.Date */ +html[data-theme="dark"] .highlight .m { color: #ffd900 } /* Literal.Number */ +html[data-theme="dark"] .highlight .s { color: #abe338 } /* Literal.String */ +html[data-theme="dark"] .highlight .na { color: #ffd900 } /* Name.Attribute */ +html[data-theme="dark"] .highlight .nb { color: #ffd900 } /* Name.Builtin */ +html[data-theme="dark"] .highlight .nc { color: #00e0e0 } /* Name.Class */ +html[data-theme="dark"] .highlight .no { color: #00e0e0 } /* Name.Constant */ +html[data-theme="dark"] .highlight .nd { color: #ffd900 } /* Name.Decorator */ +html[data-theme="dark"] .highlight .ni { color: #abe338 } /* Name.Entity */ +html[data-theme="dark"] .highlight .ne { color: #dcc6e0 } /* Name.Exception */ +html[data-theme="dark"] .highlight .nf { color: #00e0e0 } /* Name.Function */ +html[data-theme="dark"] .highlight .nl { color: #ffd900 } /* Name.Label */ +html[data-theme="dark"] .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ +html[data-theme="dark"] .highlight .nx { color: #f8f8f2 } /* Name.Other */ +html[data-theme="dark"] .highlight .py { color: #00e0e0 } /* Name.Property */ +html[data-theme="dark"] .highlight .nt { color: #00e0e0 } /* Name.Tag */ +html[data-theme="dark"] .highlight .nv { color: #ffa07a } /* Name.Variable */ +html[data-theme="dark"] .highlight .ow { color: #dcc6e0 } /* Operator.Word */ +html[data-theme="dark"] .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */ +html[data-theme="dark"] .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ +html[data-theme="dark"] .highlight .mb { color: #ffd900 } /* Literal.Number.Bin */ +html[data-theme="dark"] .highlight .mf { color: #ffd900 } /* Literal.Number.Float */ +html[data-theme="dark"] .highlight .mh { color: #ffd900 } /* Literal.Number.Hex */ +html[data-theme="dark"] .highlight .mi { color: #ffd900 } /* Literal.Number.Integer */ +html[data-theme="dark"] .highlight .mo { color: #ffd900 } /* Literal.Number.Oct */ +html[data-theme="dark"] .highlight .sa { color: #abe338 } /* Literal.String.Affix */ +html[data-theme="dark"] .highlight .sb { color: #abe338 } /* Literal.String.Backtick */ +html[data-theme="dark"] .highlight .sc { color: #abe338 } /* Literal.String.Char */ +html[data-theme="dark"] .highlight .dl { color: #abe338 } /* Literal.String.Delimiter */ +html[data-theme="dark"] .highlight .sd { color: #abe338 } /* Literal.String.Doc */ +html[data-theme="dark"] .highlight .s2 { color: #abe338 } /* Literal.String.Double */ +html[data-theme="dark"] .highlight .se { color: #abe338 } /* Literal.String.Escape */ +html[data-theme="dark"] .highlight .sh { color: #abe338 } /* Literal.String.Heredoc */ +html[data-theme="dark"] .highlight .si { color: #abe338 } /* Literal.String.Interpol */ +html[data-theme="dark"] .highlight .sx { color: #abe338 } /* Literal.String.Other */ +html[data-theme="dark"] .highlight .sr { color: #ffa07a } /* Literal.String.Regex */ +html[data-theme="dark"] .highlight .s1 { color: #abe338 } /* Literal.String.Single */ +html[data-theme="dark"] .highlight .ss { color: #00e0e0 } /* Literal.String.Symbol */ +html[data-theme="dark"] .highlight .bp { color: #ffd900 } /* Name.Builtin.Pseudo */ +html[data-theme="dark"] .highlight .fm { color: #00e0e0 } /* Name.Function.Magic */ +html[data-theme="dark"] .highlight .vc { color: #ffa07a } /* Name.Variable.Class */ +html[data-theme="dark"] .highlight .vg { color: #ffa07a } /* Name.Variable.Global */ +html[data-theme="dark"] .highlight .vi { color: #ffa07a } /* Name.Variable.Instance */ +html[data-theme="dark"] .highlight .vm { color: #ffd900 } /* Name.Variable.Magic */ +html[data-theme="dark"] .highlight .il { color: #ffd900 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/dev/docs/html/_static/sbt-webpack-macros.html b/dev/docs/html/_static/sbt-webpack-macros.html new file mode 100644 index 0000000..6cbf559 --- /dev/null +++ b/dev/docs/html/_static/sbt-webpack-macros.html @@ -0,0 +1,11 @@ + +{% macro head_pre_bootstrap() %} + +{% endmacro %} + +{% macro body_post() %} + +{% endmacro %} diff --git a/dev/docs/html/_static/scripts/bootstrap.js b/dev/docs/html/_static/scripts/bootstrap.js new file mode 100644 index 0000000..c8178de --- /dev/null +++ b/dev/docs/html/_static/scripts/bootstrap.js @@ -0,0 +1,3 @@ +/*! For license information please see bootstrap.js.LICENSE.txt */ +(()=>{"use strict";var t={d:(e,i)=>{for(var n in i)t.o(i,n)&&!t.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:i[n]})},o:(t,e)=>Object.prototype.hasOwnProperty.call(t,e),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},e={};t.r(e),t.d(e,{afterMain:()=>E,afterRead:()=>v,afterWrite:()=>C,applyStyles:()=>$,arrow:()=>J,auto:()=>a,basePlacements:()=>l,beforeMain:()=>y,beforeRead:()=>_,beforeWrite:()=>A,bottom:()=>s,clippingParents:()=>d,computeStyles:()=>it,createPopper:()=>Dt,createPopperBase:()=>St,createPopperLite:()=>$t,detectOverflow:()=>_t,end:()=>h,eventListeners:()=>st,flip:()=>bt,hide:()=>wt,left:()=>r,main:()=>w,modifierPhases:()=>O,offset:()=>Et,placements:()=>g,popper:()=>f,popperGenerator:()=>Lt,popperOffsets:()=>At,preventOverflow:()=>Tt,read:()=>b,reference:()=>p,right:()=>o,start:()=>c,top:()=>n,variationPlacements:()=>m,viewport:()=>u,write:()=>T});var i={};t.r(i),t.d(i,{Alert:()=>Oe,Button:()=>ke,Carousel:()=>li,Collapse:()=>Ei,Dropdown:()=>Ki,Modal:()=>Ln,Offcanvas:()=>Kn,Popover:()=>bs,ScrollSpy:()=>Ls,Tab:()=>Js,Toast:()=>po,Tooltip:()=>fs});var n="top",s="bottom",o="right",r="left",a="auto",l=[n,s,o,r],c="start",h="end",d="clippingParents",u="viewport",f="popper",p="reference",m=l.reduce((function(t,e){return t.concat([e+"-"+c,e+"-"+h])}),[]),g=[].concat(l,[a]).reduce((function(t,e){return t.concat([e,e+"-"+c,e+"-"+h])}),[]),_="beforeRead",b="read",v="afterRead",y="beforeMain",w="main",E="afterMain",A="beforeWrite",T="write",C="afterWrite",O=[_,b,v,y,w,E,A,T,C];function x(t){return t?(t.nodeName||"").toLowerCase():null}function k(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function L(t){return t instanceof k(t).Element||t instanceof Element}function S(t){return t instanceof k(t).HTMLElement||t instanceof HTMLElement}function D(t){return"undefined"!=typeof ShadowRoot&&(t instanceof k(t).ShadowRoot||t instanceof ShadowRoot)}const $={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];S(s)&&x(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});S(n)&&x(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function I(t){return t.split("-")[0]}var N=Math.max,P=Math.min,M=Math.round;function j(){var t=navigator.userAgentData;return null!=t&&t.brands&&Array.isArray(t.brands)?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function F(){return!/^((?!chrome|android).)*safari/i.test(j())}function H(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&S(t)&&(s=t.offsetWidth>0&&M(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&M(n.height)/t.offsetHeight||1);var r=(L(t)?k(t):window).visualViewport,a=!F()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,h=n.width/s,d=n.height/o;return{width:h,height:d,top:c,right:l+h,bottom:c+d,left:l,x:l,y:c}}function B(t){var e=H(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function W(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&D(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function z(t){return k(t).getComputedStyle(t)}function R(t){return["table","td","th"].indexOf(x(t))>=0}function q(t){return((L(t)?t.ownerDocument:t.document)||window.document).documentElement}function V(t){return"html"===x(t)?t:t.assignedSlot||t.parentNode||(D(t)?t.host:null)||q(t)}function Y(t){return S(t)&&"fixed"!==z(t).position?t.offsetParent:null}function K(t){for(var e=k(t),i=Y(t);i&&R(i)&&"static"===z(i).position;)i=Y(i);return i&&("html"===x(i)||"body"===x(i)&&"static"===z(i).position)?e:i||function(t){var e=/firefox/i.test(j());if(/Trident/i.test(j())&&S(t)&&"fixed"===z(t).position)return null;var i=V(t);for(D(i)&&(i=i.host);S(i)&&["html","body"].indexOf(x(i))<0;){var n=z(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function Q(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function X(t,e,i){return N(t,P(e,i))}function U(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function G(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const J={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,i=t.state,a=t.name,c=t.options,h=i.elements.arrow,d=i.modifiersData.popperOffsets,u=I(i.placement),f=Q(u),p=[r,o].indexOf(u)>=0?"height":"width";if(h&&d){var m=function(t,e){return U("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:G(t,l))}(c.padding,i),g=B(h),_="y"===f?n:r,b="y"===f?s:o,v=i.rects.reference[p]+i.rects.reference[f]-d[f]-i.rects.popper[p],y=d[f]-i.rects.reference[f],w=K(h),E=w?"y"===f?w.clientHeight||0:w.clientWidth||0:0,A=v/2-y/2,T=m[_],C=E-g[p]-m[b],O=E/2-g[p]/2+A,x=X(T,O,C),k=f;i.modifiersData[a]=((e={})[k]=x,e.centerOffset=x-O,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&W(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function Z(t){return t.split("-")[1]}var tt={top:"auto",right:"auto",bottom:"auto",left:"auto"};function et(t){var e,i=t.popper,a=t.popperRect,l=t.placement,c=t.variation,d=t.offsets,u=t.position,f=t.gpuAcceleration,p=t.adaptive,m=t.roundOffsets,g=t.isFixed,_=d.x,b=void 0===_?0:_,v=d.y,y=void 0===v?0:v,w="function"==typeof m?m({x:b,y}):{x:b,y};b=w.x,y=w.y;var E=d.hasOwnProperty("x"),A=d.hasOwnProperty("y"),T=r,C=n,O=window;if(p){var x=K(i),L="clientHeight",S="clientWidth";x===k(i)&&"static"!==z(x=q(i)).position&&"absolute"===u&&(L="scrollHeight",S="scrollWidth"),(l===n||(l===r||l===o)&&c===h)&&(C=s,y-=(g&&x===O&&O.visualViewport?O.visualViewport.height:x[L])-a.height,y*=f?1:-1),l!==r&&(l!==n&&l!==s||c!==h)||(T=o,b-=(g&&x===O&&O.visualViewport?O.visualViewport.width:x[S])-a.width,b*=f?1:-1)}var D,$=Object.assign({position:u},p&&tt),I=!0===m?function(t,e){var i=t.x,n=t.y,s=e.devicePixelRatio||1;return{x:M(i*s)/s||0,y:M(n*s)/s||0}}({x:b,y},k(i)):{x:b,y};return b=I.x,y=I.y,f?Object.assign({},$,((D={})[C]=A?"0":"",D[T]=E?"0":"",D.transform=(O.devicePixelRatio||1)<=1?"translate("+b+"px, "+y+"px)":"translate3d("+b+"px, "+y+"px, 0)",D)):Object.assign({},$,((e={})[C]=A?y+"px":"",e[T]=E?b+"px":"",e.transform="",e))}const it={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:I(e.placement),variation:Z(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,et(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,et(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var nt={passive:!0};const st={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=k(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,nt)})),a&&l.addEventListener("resize",i.update,nt),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,nt)})),a&&l.removeEventListener("resize",i.update,nt)}},data:{}};var ot={left:"right",right:"left",bottom:"top",top:"bottom"};function rt(t){return t.replace(/left|right|bottom|top/g,(function(t){return ot[t]}))}var at={start:"end",end:"start"};function lt(t){return t.replace(/start|end/g,(function(t){return at[t]}))}function ct(t){var e=k(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function ht(t){return H(q(t)).left+ct(t).scrollLeft}function dt(t){var e=z(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function ut(t){return["html","body","#document"].indexOf(x(t))>=0?t.ownerDocument.body:S(t)&&dt(t)?t:ut(V(t))}function ft(t,e){var i;void 0===e&&(e=[]);var n=ut(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=k(n),r=s?[o].concat(o.visualViewport||[],dt(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(ft(V(r)))}function pt(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function mt(t,e,i){return e===u?pt(function(t,e){var i=k(t),n=q(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=F();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+ht(t),y:l}}(t,i)):L(e)?function(t,e){var i=H(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):pt(function(t){var e,i=q(t),n=ct(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=N(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=N(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+ht(t),l=-n.scrollTop;return"rtl"===z(s||i).direction&&(a+=N(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(q(t)))}function gt(t){var e,i=t.reference,a=t.element,l=t.placement,d=l?I(l):null,u=l?Z(l):null,f=i.x+i.width/2-a.width/2,p=i.y+i.height/2-a.height/2;switch(d){case n:e={x:f,y:i.y-a.height};break;case s:e={x:f,y:i.y+i.height};break;case o:e={x:i.x+i.width,y:p};break;case r:e={x:i.x-a.width,y:p};break;default:e={x:i.x,y:i.y}}var m=d?Q(d):null;if(null!=m){var g="y"===m?"height":"width";switch(u){case c:e[m]=e[m]-(i[g]/2-a[g]/2);break;case h:e[m]=e[m]+(i[g]/2-a[g]/2)}}return e}function _t(t,e){void 0===e&&(e={});var i=e,r=i.placement,a=void 0===r?t.placement:r,c=i.strategy,h=void 0===c?t.strategy:c,m=i.boundary,g=void 0===m?d:m,_=i.rootBoundary,b=void 0===_?u:_,v=i.elementContext,y=void 0===v?f:v,w=i.altBoundary,E=void 0!==w&&w,A=i.padding,T=void 0===A?0:A,C=U("number"!=typeof T?T:G(T,l)),O=y===f?p:f,k=t.rects.popper,D=t.elements[E?O:y],$=function(t,e,i,n){var s="clippingParents"===e?function(t){var e=ft(V(t)),i=["absolute","fixed"].indexOf(z(t).position)>=0&&S(t)?K(t):t;return L(i)?e.filter((function(t){return L(t)&&W(t,i)&&"body"!==x(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=mt(t,i,n);return e.top=N(s.top,e.top),e.right=P(s.right,e.right),e.bottom=P(s.bottom,e.bottom),e.left=N(s.left,e.left),e}),mt(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}(L(D)?D:D.contextElement||q(t.elements.popper),g,b,h),I=H(t.elements.reference),M=gt({reference:I,element:k,strategy:"absolute",placement:a}),j=pt(Object.assign({},k,M)),F=y===f?j:I,B={top:$.top-F.top+C.top,bottom:F.bottom-$.bottom+C.bottom,left:$.left-F.left+C.left,right:F.right-$.right+C.right},R=t.modifiersData.offset;if(y===f&&R){var Y=R[a];Object.keys(B).forEach((function(t){var e=[o,s].indexOf(t)>=0?1:-1,i=[n,s].indexOf(t)>=0?"y":"x";B[t]+=Y[i]*e}))}return B}const bt={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,h=t.name;if(!e.modifiersData[h]._skip){for(var d=i.mainAxis,u=void 0===d||d,f=i.altAxis,p=void 0===f||f,_=i.fallbackPlacements,b=i.padding,v=i.boundary,y=i.rootBoundary,w=i.altBoundary,E=i.flipVariations,A=void 0===E||E,T=i.allowedAutoPlacements,C=e.options.placement,O=I(C),x=_||(O!==C&&A?function(t){if(I(t)===a)return[];var e=rt(t);return[lt(t),e,lt(e)]}(C):[rt(C)]),k=[C].concat(x).reduce((function(t,i){return t.concat(I(i)===a?function(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,a=i.flipVariations,c=i.allowedAutoPlacements,h=void 0===c?g:c,d=Z(n),u=d?a?m:m.filter((function(t){return Z(t)===d})):l,f=u.filter((function(t){return h.indexOf(t)>=0}));0===f.length&&(f=u);var p=f.reduce((function(e,i){return e[i]=_t(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[I(i)],e}),{});return Object.keys(p).sort((function(t,e){return p[t]-p[e]}))}(e,{placement:i,boundary:v,rootBoundary:y,padding:b,flipVariations:A,allowedAutoPlacements:T}):i)}),[]),L=e.rects.reference,S=e.rects.popper,D=new Map,$=!0,N=k[0],P=0;P=0,B=H?"width":"height",W=_t(e,{placement:M,boundary:v,rootBoundary:y,altBoundary:w,padding:b}),z=H?F?o:r:F?s:n;L[B]>S[B]&&(z=rt(z));var R=rt(z),q=[];if(u&&q.push(W[j]<=0),p&&q.push(W[z]<=0,W[R]<=0),q.every((function(t){return t}))){N=M,$=!1;break}D.set(M,q)}if($)for(var V=function(t){var e=k.find((function(e){var i=D.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return N=e,"break"},Y=A?3:1;Y>0&&"break"!==V(Y);Y--);e.placement!==N&&(e.modifiersData[h]._skip=!0,e.placement=N,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function vt(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function yt(t){return[n,o,s,r].some((function(e){return t[e]>=0}))}const wt={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=_t(e,{elementContext:"reference"}),a=_t(e,{altBoundary:!0}),l=vt(r,n),c=vt(a,s,o),h=yt(l),d=yt(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},Et={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,i=t.options,s=t.name,a=i.offset,l=void 0===a?[0,0]:a,c=g.reduce((function(t,i){return t[i]=function(t,e,i){var s=I(t),a=[r,n].indexOf(s)>=0?-1:1,l="function"==typeof i?i(Object.assign({},e,{placement:t})):i,c=l[0],h=l[1];return c=c||0,h=(h||0)*a,[r,o].indexOf(s)>=0?{x:h,y:c}:{x:c,y:h}}(i,e.rects,l),t}),{}),h=c[e.placement],d=h.x,u=h.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=d,e.modifiersData.popperOffsets.y+=u),e.modifiersData[s]=c}},At={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=gt({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},Tt={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,a=t.name,l=i.mainAxis,h=void 0===l||l,d=i.altAxis,u=void 0!==d&&d,f=i.boundary,p=i.rootBoundary,m=i.altBoundary,g=i.padding,_=i.tether,b=void 0===_||_,v=i.tetherOffset,y=void 0===v?0:v,w=_t(e,{boundary:f,rootBoundary:p,padding:g,altBoundary:m}),E=I(e.placement),A=Z(e.placement),T=!A,C=Q(E),O="x"===C?"y":"x",x=e.modifiersData.popperOffsets,k=e.rects.reference,L=e.rects.popper,S="function"==typeof y?y(Object.assign({},e.rects,{placement:e.placement})):y,D="number"==typeof S?{mainAxis:S,altAxis:S}:Object.assign({mainAxis:0,altAxis:0},S),$=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,M={x:0,y:0};if(x){if(h){var j,F="y"===C?n:r,H="y"===C?s:o,W="y"===C?"height":"width",z=x[C],R=z+w[F],q=z-w[H],V=b?-L[W]/2:0,Y=A===c?k[W]:L[W],U=A===c?-L[W]:-k[W],G=e.elements.arrow,J=b&&G?B(G):{width:0,height:0},tt=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},et=tt[F],it=tt[H],nt=X(0,k[W],J[W]),st=T?k[W]/2-V-nt-et-D.mainAxis:Y-nt-et-D.mainAxis,ot=T?-k[W]/2+V+nt+it+D.mainAxis:U+nt+it+D.mainAxis,rt=e.elements.arrow&&K(e.elements.arrow),at=rt?"y"===C?rt.clientTop||0:rt.clientLeft||0:0,lt=null!=(j=null==$?void 0:$[C])?j:0,ct=z+ot-lt,ht=X(b?P(R,z+st-lt-at):R,z,b?N(q,ct):q);x[C]=ht,M[C]=ht-z}if(u){var dt,ut="x"===C?n:r,ft="x"===C?s:o,pt=x[O],mt="y"===O?"height":"width",gt=pt+w[ut],bt=pt-w[ft],vt=-1!==[n,r].indexOf(E),yt=null!=(dt=null==$?void 0:$[O])?dt:0,wt=vt?gt:pt-k[mt]-L[mt]-yt+D.altAxis,Et=vt?pt+k[mt]+L[mt]-yt-D.altAxis:bt,At=b&&vt?function(t,e,i){var n=X(t,e,i);return n>i?i:n}(wt,pt,Et):X(b?wt:gt,pt,b?Et:bt);x[O]=At,M[O]=At-pt}e.modifiersData[a]=M}},requiresIfExists:["offset"]};function Ct(t,e,i){void 0===i&&(i=!1);var n,s,o=S(e),r=S(e)&&function(t){var e=t.getBoundingClientRect(),i=M(e.width)/t.offsetWidth||1,n=M(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=q(e),l=H(t,r,i),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==x(e)||dt(a))&&(c=(n=e)!==k(n)&&S(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:ct(n)),S(e)?((h=H(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=ht(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}function Ot(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var xt={placement:"bottom",modifiers:[],strategy:"absolute"};function kt(){for(var t=arguments.length,e=new Array(t),i=0;iIt.has(t)&&It.get(t).get(e)||null,remove(t,e){if(!It.has(t))return;const i=It.get(t);i.delete(e),0===i.size&&It.delete(t)}},Pt="transitionend",Mt=t=>(t&&window.CSS&&window.CSS.escape&&(t=t.replace(/#([^\s"#']+)/g,((t,e)=>`#${CSS.escape(e)}`))),t),jt=t=>{t.dispatchEvent(new Event(Pt))},Ft=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),Ht=t=>Ft(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(Mt(t)):null,Bt=t=>{if(!Ft(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},Wt=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),zt=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?zt(t.parentNode):null},Rt=()=>{},qt=t=>{t.offsetHeight},Vt=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,Yt=[],Kt=()=>"rtl"===document.documentElement.dir,Qt=t=>{var e;e=()=>{const e=Vt();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(Yt.length||document.addEventListener("DOMContentLoaded",(()=>{for(const t of Yt)t()})),Yt.push(e)):e()},Xt=(t,e=[],i=t)=>"function"==typeof t?t(...e):i,Ut=(t,e,i=!0)=>{if(!i)return void Xt(t);const n=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let s=!1;const o=({target:i})=>{i===e&&(s=!0,e.removeEventListener(Pt,o),Xt(t))};e.addEventListener(Pt,o),setTimeout((()=>{s||jt(e)}),n)},Gt=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},Jt=/[^.]*(?=\..*)\.|.*/,Zt=/\..*/,te=/::\d+$/,ee={};let ie=1;const ne={mouseenter:"mouseover",mouseleave:"mouseout"},se=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function oe(t,e){return e&&`${e}::${ie++}`||t.uidEvent||ie++}function re(t){const e=oe(t);return t.uidEvent=e,ee[e]=ee[e]||{},ee[e]}function ae(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function le(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=ue(t);return se.has(o)||(o=t),[n,s,o]}function ce(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=le(e,i,n);if(e in ne){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=re(t),c=l[a]||(l[a]={}),h=ae(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=oe(r,e.replace(Jt,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return pe(s,{delegateTarget:r}),n.oneOff&&fe.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return pe(n,{delegateTarget:t}),i.oneOff&&fe.off(t,n.type,e),e.apply(t,[n])}}(t,r);u.delegationSelector=o?i:null,u.callable=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function he(t,e,i,n,s){const o=ae(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function de(t,e,i,n){const s=e[i]||{};for(const[o,r]of Object.entries(s))o.includes(n)&&he(t,e,i,r.callable,r.delegationSelector)}function ue(t){return t=t.replace(Zt,""),ne[t]||t}const fe={on(t,e,i,n){ce(t,e,i,n,!1)},one(t,e,i,n){ce(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=le(e,i,n),a=r!==e,l=re(t),c=l[r]||{},h=e.startsWith(".");if(void 0===o){if(h)for(const i of Object.keys(l))de(t,l,i,e.slice(1));for(const[i,n]of Object.entries(c)){const s=i.replace(te,"");a&&!e.includes(s)||he(t,l,r,n.callable,n.delegationSelector)}}else{if(!Object.keys(c).length)return;he(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=Vt();let s=null,o=!0,r=!0,a=!1;e!==ue(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());const l=pe(new Event(e,{bubbles:o,cancelable:!0}),i);return a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function pe(t,e={}){for(const[i,n]of Object.entries(e))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}function me(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function ge(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const _e={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${ge(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${ge(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith("bs")&&!t.startsWith("bsConfig")));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=me(t.dataset[n])}return e},getDataAttribute:(t,e)=>me(t.getAttribute(`data-bs-${ge(e)}`))};class be{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=Ft(e)?_e.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...Ft(e)?_e.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const[n,s]of Object.entries(e)){const e=t[n],o=Ft(e)?"element":null==(i=e)?`${i}`:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(o))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${o}" but expected type "${s}".`)}var i}}class ve extends be{constructor(t,e){super(),(t=Ht(t))&&(this._element=t,this._config=this._getConfig(e),Nt.set(this._element,this.constructor.DATA_KEY,this))}dispose(){Nt.remove(this._element,this.constructor.DATA_KEY),fe.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){Ut(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return Nt.get(Ht(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.3.3"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const ye=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?i.trim():null}return e?e.split(",").map((t=>Mt(t))).join(","):null},we={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(",");return this.find(e,t).filter((t=>!Wt(t)&&Bt(t)))},getSelectorFromElement(t){const e=ye(t);return e&&we.findOne(e)?e:null},getElementFromSelector(t){const e=ye(t);return e?we.findOne(e):null},getMultipleElementsFromSelector(t){const e=ye(t);return e?we.find(e):[]}},Ee=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,n=t.NAME;fe.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),Wt(this))return;const s=we.getElementFromSelector(this)||this.closest(`.${n}`);t.getOrCreateInstance(s)[e]()}))},Ae=".bs.alert",Te=`close${Ae}`,Ce=`closed${Ae}`;class Oe extends ve{static get NAME(){return"alert"}close(){if(fe.trigger(this._element,Te).defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),fe.trigger(this._element,Ce),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Oe.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}Ee(Oe,"close"),Qt(Oe);const xe='[data-bs-toggle="button"]';class ke extends ve{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=ke.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}fe.on(document,"click.bs.button.data-api",xe,(t=>{t.preventDefault();const e=t.target.closest(xe);ke.getOrCreateInstance(e).toggle()})),Qt(ke);const Le=".bs.swipe",Se=`touchstart${Le}`,De=`touchmove${Le}`,$e=`touchend${Le}`,Ie=`pointerdown${Le}`,Ne=`pointerup${Le}`,Pe={endCallback:null,leftCallback:null,rightCallback:null},Me={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class je extends be{constructor(t,e){super(),this._element=t,t&&je.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return Pe}static get DefaultType(){return Me}static get NAME(){return"swipe"}dispose(){fe.off(this._element,Le)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),Xt(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&Xt(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(fe.on(this._element,Ie,(t=>this._start(t))),fe.on(this._element,Ne,(t=>this._end(t))),this._element.classList.add("pointer-event")):(fe.on(this._element,Se,(t=>this._start(t))),fe.on(this._element,De,(t=>this._move(t))),fe.on(this._element,$e,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const Fe=".bs.carousel",He=".data-api",Be="ArrowLeft",We="ArrowRight",ze="next",Re="prev",qe="left",Ve="right",Ye=`slide${Fe}`,Ke=`slid${Fe}`,Qe=`keydown${Fe}`,Xe=`mouseenter${Fe}`,Ue=`mouseleave${Fe}`,Ge=`dragstart${Fe}`,Je=`load${Fe}${He}`,Ze=`click${Fe}${He}`,ti="carousel",ei="active",ii=".active",ni=".carousel-item",si=ii+ni,oi={[Be]:Ve,[We]:qe},ri={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},ai={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class li extends ve{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=we.findOne(".carousel-indicators",this._element),this._addEventListeners(),this._config.ride===ti&&this.cycle()}static get Default(){return ri}static get DefaultType(){return ai}static get NAME(){return"carousel"}next(){this._slide(ze)}nextWhenVisible(){!document.hidden&&Bt(this._element)&&this.next()}prev(){this._slide(Re)}pause(){this._isSliding&&jt(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?fe.one(this._element,Ke,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void fe.one(this._element,Ke,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?ze:Re;this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&fe.on(this._element,Qe,(t=>this._keydown(t))),"hover"===this._config.pause&&(fe.on(this._element,Xe,(()=>this.pause())),fe.on(this._element,Ue,(()=>this._maybeEnableCycle()))),this._config.touch&&je.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of we.find(".carousel-item img",this._element))fe.on(t,Ge,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(qe)),rightCallback:()=>this._slide(this._directionToOrder(Ve)),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new je(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=oi[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=we.findOne(ii,this._indicatorsElement);e.classList.remove(ei),e.removeAttribute("aria-current");const i=we.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add(ei),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n=t===ze,s=e||Gt(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>fe.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(Ye).defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),qt(s),i.classList.add(l),s.classList.add(l),this._queueCallback((()=>{s.classList.remove(l,c),s.classList.add(ei),i.classList.remove(ei,c,l),this._isSliding=!1,r(Ke)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return we.findOne(si,this._element)}_getItems(){return we.find(ni,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return Kt()?t===qe?Re:ze:t===qe?ze:Re}_orderToDirection(t){return Kt()?t===Re?qe:Ve:t===Re?Ve:qe}static jQueryInterface(t){return this.each((function(){const e=li.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}fe.on(document,Ze,"[data-bs-slide], [data-bs-slide-to]",(function(t){const e=we.getElementFromSelector(this);if(!e||!e.classList.contains(ti))return;t.preventDefault();const i=li.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===_e.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),fe.on(window,Je,(()=>{const t=we.find('[data-bs-ride="carousel"]');for(const e of t)li.getOrCreateInstance(e)})),Qt(li);const ci=".bs.collapse",hi=`show${ci}`,di=`shown${ci}`,ui=`hide${ci}`,fi=`hidden${ci}`,pi=`click${ci}.data-api`,mi="show",gi="collapse",_i="collapsing",bi=`:scope .${gi} .${gi}`,vi='[data-bs-toggle="collapse"]',yi={parent:null,toggle:!0},wi={parent:"(null|element)",toggle:"boolean"};class Ei extends ve{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=we.find(vi);for(const t of i){const e=we.getSelectorFromElement(t),i=we.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return yi}static get DefaultType(){return wi}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter((t=>t!==this._element)).map((t=>Ei.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(fe.trigger(this._element,hi).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(gi),this._element.classList.add(_i),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(_i),this._element.classList.add(gi,mi),this._element.style[e]="",fe.trigger(this._element,di)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(fe.trigger(this._element,ui).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,qt(this._element),this._element.classList.add(_i),this._element.classList.remove(gi,mi);for(const t of this._triggerArray){const e=we.getElementFromSelector(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(_i),this._element.classList.add(gi),fe.trigger(this._element,fi)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(mi)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=Ht(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(vi);for(const e of t){const t=we.getElementFromSelector(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=we.find(bi,this._config.parent);return we.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=Ei.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}fe.on(document,pi,vi,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();for(const t of we.getMultipleElementsFromSelector(this))Ei.getOrCreateInstance(t,{toggle:!1}).toggle()})),Qt(Ei);const Ai="dropdown",Ti=".bs.dropdown",Ci=".data-api",Oi="ArrowUp",xi="ArrowDown",ki=`hide${Ti}`,Li=`hidden${Ti}`,Si=`show${Ti}`,Di=`shown${Ti}`,$i=`click${Ti}${Ci}`,Ii=`keydown${Ti}${Ci}`,Ni=`keyup${Ti}${Ci}`,Pi="show",Mi='[data-bs-toggle="dropdown"]:not(.disabled):not(:disabled)',ji=`${Mi}.${Pi}`,Fi=".dropdown-menu",Hi=Kt()?"top-end":"top-start",Bi=Kt()?"top-start":"top-end",Wi=Kt()?"bottom-end":"bottom-start",zi=Kt()?"bottom-start":"bottom-end",Ri=Kt()?"left-start":"right-start",qi=Kt()?"right-start":"left-start",Vi={autoClose:!0,boundary:"clippingParents",display:"dynamic",offset:[0,2],popperConfig:null,reference:"toggle"},Yi={autoClose:"(boolean|string)",boundary:"(string|element)",display:"string",offset:"(array|string|function)",popperConfig:"(null|object|function)",reference:"(string|element|object)"};class Ki extends ve{constructor(t,e){super(t,e),this._popper=null,this._parent=this._element.parentNode,this._menu=we.next(this._element,Fi)[0]||we.prev(this._element,Fi)[0]||we.findOne(Fi,this._parent),this._inNavbar=this._detectNavbar()}static get Default(){return Vi}static get DefaultType(){return Yi}static get NAME(){return Ai}toggle(){return this._isShown()?this.hide():this.show()}show(){if(Wt(this._element)||this._isShown())return;const t={relatedTarget:this._element};if(!fe.trigger(this._element,Si,t).defaultPrevented){if(this._createPopper(),"ontouchstart"in document.documentElement&&!this._parent.closest(".navbar-nav"))for(const t of[].concat(...document.body.children))fe.on(t,"mouseover",Rt);this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add(Pi),this._element.classList.add(Pi),fe.trigger(this._element,Di,t)}}hide(){if(Wt(this._element)||!this._isShown())return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){if(!fe.trigger(this._element,ki,t).defaultPrevented){if("ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.off(t,"mouseover",Rt);this._popper&&this._popper.destroy(),this._menu.classList.remove(Pi),this._element.classList.remove(Pi),this._element.setAttribute("aria-expanded","false"),_e.removeDataAttribute(this._menu,"popper"),fe.trigger(this._element,Li,t)}}_getConfig(t){if("object"==typeof(t=super._getConfig(t)).reference&&!Ft(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError(`${Ai.toUpperCase()}: Option "reference" provided type "object" without a required "getBoundingClientRect" method.`);return t}_createPopper(){if(void 0===e)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let t=this._element;"parent"===this._config.reference?t=this._parent:Ft(this._config.reference)?t=Ht(this._config.reference):"object"==typeof this._config.reference&&(t=this._config.reference);const i=this._getPopperConfig();this._popper=Dt(t,this._menu,i)}_isShown(){return this._menu.classList.contains(Pi)}_getPlacement(){const t=this._parent;if(t.classList.contains("dropend"))return Ri;if(t.classList.contains("dropstart"))return qi;if(t.classList.contains("dropup-center"))return"top";if(t.classList.contains("dropdown-center"))return"bottom";const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?Bi:Hi:e?zi:Wi}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(_e.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,...Xt(this._config.popperConfig,[t])}}_selectMenuItem({key:t,target:e}){const i=we.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter((t=>Bt(t)));i.length&&Gt(i,e,t===xi,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=Ki.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=we.find(ji);for(const i of e){const e=Ki.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Oi,xi].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(Mi)?this:we.prev(this,Mi)[0]||we.next(this,Mi)[0]||we.findOne(Mi,t.delegateTarget.parentNode),o=Ki.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}fe.on(document,Ii,Mi,Ki.dataApiKeydownHandler),fe.on(document,Ii,Fi,Ki.dataApiKeydownHandler),fe.on(document,$i,Ki.clearMenus),fe.on(document,Ni,Ki.clearMenus),fe.on(document,$i,Mi,(function(t){t.preventDefault(),Ki.getOrCreateInstance(this).toggle()})),Qt(Ki);const Qi="backdrop",Xi="show",Ui=`mousedown.bs.${Qi}`,Gi={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},Ji={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class Zi extends be{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return Gi}static get DefaultType(){return Ji}static get NAME(){return Qi}show(t){if(!this._config.isVisible)return void Xt(t);this._append();const e=this._getElement();this._config.isAnimated&&qt(e),e.classList.add(Xi),this._emulateAnimation((()=>{Xt(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(Xi),this._emulateAnimation((()=>{this.dispose(),Xt(t)}))):Xt(t)}dispose(){this._isAppended&&(fe.off(this._element,Ui),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=Ht(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),fe.on(t,Ui,(()=>{Xt(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){Ut(t,this._getElement(),this._config.isAnimated)}}const tn=".bs.focustrap",en=`focusin${tn}`,nn=`keydown.tab${tn}`,sn="backward",on={autofocus:!0,trapElement:null},rn={autofocus:"boolean",trapElement:"element"};class an extends be{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return on}static get DefaultType(){return rn}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),fe.off(document,tn),fe.on(document,en,(t=>this._handleFocusin(t))),fe.on(document,nn,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,fe.off(document,tn))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=we.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===sn?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?sn:"forward")}}const ln=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",cn=".sticky-top",hn="padding-right",dn="margin-right";class un{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,hn,(e=>e+t)),this._setElementAttributes(ln,hn,(e=>e+t)),this._setElementAttributes(cn,dn,(e=>e-t))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,hn),this._resetElementAttributes(ln,hn),this._resetElementAttributes(cn,dn)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(s))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&_e.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=_e.getDataAttribute(t,e);null!==i?(_e.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(Ft(t))e(t);else for(const i of we.find(t,this._element))e(i)}}const fn=".bs.modal",pn=`hide${fn}`,mn=`hidePrevented${fn}`,gn=`hidden${fn}`,_n=`show${fn}`,bn=`shown${fn}`,vn=`resize${fn}`,yn=`click.dismiss${fn}`,wn=`mousedown.dismiss${fn}`,En=`keydown.dismiss${fn}`,An=`click${fn}.data-api`,Tn="modal-open",Cn="show",On="modal-static",xn={backdrop:!0,focus:!0,keyboard:!0},kn={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class Ln extends ve{constructor(t,e){super(t,e),this._dialog=we.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new un,this._addEventListeners()}static get Default(){return xn}static get DefaultType(){return kn}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||fe.trigger(this._element,_n,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(Tn),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(fe.trigger(this._element,pn).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(Cn),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){fe.off(window,fn),fe.off(this._dialog,fn),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new Zi({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new an({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=we.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),qt(this._element),this._element.classList.add(Cn),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,fe.trigger(this._element,bn,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){fe.on(this._element,En,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():this._triggerBackdropTransition())})),fe.on(window,vn,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),fe.on(this._element,wn,(t=>{fe.one(this._element,yn,(e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(Tn),this._resetAdjustments(),this._scrollBar.reset(),fe.trigger(this._element,gn)}))}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(fe.trigger(this._element,mn).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains(On)||(t||(this._element.style.overflowY="hidden"),this._element.classList.add(On),this._queueCallback((()=>{this._element.classList.remove(On),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=Kt()?"paddingLeft":"paddingRight";this._element.style[t]=`${e}px`}if(!i&&t){const t=Kt()?"paddingRight":"paddingLeft";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=Ln.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}fe.on(document,An,'[data-bs-toggle="modal"]',(function(t){const e=we.getElementFromSelector(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),fe.one(e,_n,(t=>{t.defaultPrevented||fe.one(e,gn,(()=>{Bt(this)&&this.focus()}))}));const i=we.findOne(".modal.show");i&&Ln.getInstance(i).hide(),Ln.getOrCreateInstance(e).toggle(this)})),Ee(Ln),Qt(Ln);const Sn=".bs.offcanvas",Dn=".data-api",$n=`load${Sn}${Dn}`,In="show",Nn="showing",Pn="hiding",Mn=".offcanvas.show",jn=`show${Sn}`,Fn=`shown${Sn}`,Hn=`hide${Sn}`,Bn=`hidePrevented${Sn}`,Wn=`hidden${Sn}`,zn=`resize${Sn}`,Rn=`click${Sn}${Dn}`,qn=`keydown.dismiss${Sn}`,Vn={backdrop:!0,keyboard:!0,scroll:!1},Yn={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class Kn extends ve{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return Vn}static get DefaultType(){return Yn}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||fe.trigger(this._element,jn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new un).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add(Nn),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add(In),this._element.classList.remove(Nn),fe.trigger(this._element,Fn,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(fe.trigger(this._element,Hn).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add(Pn),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove(In,Pn),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new un).reset(),fe.trigger(this._element,Wn)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new Zi({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():fe.trigger(this._element,Bn)}:null})}_initializeFocusTrap(){return new an({trapElement:this._element})}_addEventListeners(){fe.on(this._element,qn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():fe.trigger(this._element,Bn))}))}static jQueryInterface(t){return this.each((function(){const e=Kn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}fe.on(document,Rn,'[data-bs-toggle="offcanvas"]',(function(t){const e=we.getElementFromSelector(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),Wt(this))return;fe.one(e,Wn,(()=>{Bt(this)&&this.focus()}));const i=we.findOne(Mn);i&&i!==e&&Kn.getInstance(i).hide(),Kn.getOrCreateInstance(e).toggle(this)})),fe.on(window,$n,(()=>{for(const t of we.find(Mn))Kn.getOrCreateInstance(t).show()})),fe.on(window,zn,(()=>{for(const t of we.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&Kn.getOrCreateInstance(t).hide()})),Ee(Kn),Qt(Kn);const Qn={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],dd:[],div:[],dl:[],dt:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Xn=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Un=/^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i,Gn=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!Xn.has(i)||Boolean(Un.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},Jn={allowList:Qn,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:"
"},Zn={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},ts={entry:"(string|element|function|null)",selector:"(string|element)"};class es extends be{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return Jn}static get DefaultType(){return Zn}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},ts)}_setContent(t,e,i){const n=we.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?Ft(e)?this._putElementInTemplate(Ht(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)Gn(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return Xt(t,[this])}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const is=new Set(["sanitize","allowList","sanitizeFn"]),ns="fade",ss="show",os=".tooltip-inner",rs=".modal",as="hide.bs.modal",ls="hover",cs="focus",hs={AUTO:"auto",TOP:"top",RIGHT:Kt()?"left":"right",BOTTOM:"bottom",LEFT:Kt()?"right":"left"},ds={allowList:Qn,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,6],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},us={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class fs extends ve{constructor(t,i){if(void 0===e)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,i),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return ds}static get DefaultType(){return us}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),fe.off(this._element.closest(rs),as,this._hideModalHandler),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=fe.trigger(this._element,this.constructor.eventName("show")),e=(zt(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),fe.trigger(this._element,this.constructor.eventName("inserted"))),this._popper=this._createPopper(i),i.classList.add(ss),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.on(t,"mouseover",Rt);this._queueCallback((()=>{fe.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!fe.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented){if(this._getTipElement().classList.remove(ss),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.off(t,"mouseover",Rt);this._activeTrigger.click=!1,this._activeTrigger[cs]=!1,this._activeTrigger[ls]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute("aria-describedby"),fe.trigger(this._element,this.constructor.eventName("hidden")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(ns,ss),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add(ns),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new es({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{[os]:this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(ns)}_isShown(){return this.tip&&this.tip.classList.contains(ss)}_createPopper(t){const e=Xt(this._config.placement,[this,t,this._element]),i=hs[e.toUpperCase()];return Dt(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return Xt(t,[this._element])}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,...Xt(this._config.popperConfig,[e])}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)fe.on(this._element,this.constructor.eventName("click"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if("manual"!==e){const t=e===ls?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i=e===ls?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");fe.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?cs:ls]=!0,e._enter()})),fe.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?cs:ls]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},fe.on(this._element.closest(rs),as,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=_e.getDataAttributes(this._element);for(const t of Object.keys(e))is.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:Ht(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const[e,i]of Object.entries(this._config))this.constructor.Default[e]!==i&&(t[e]=i);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=fs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Qt(fs);const ps=".popover-header",ms=".popover-body",gs={...fs.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},_s={...fs.DefaultType,content:"(null|string|element|function)"};class bs extends fs{static get Default(){return gs}static get DefaultType(){return _s}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{[ps]:this._getTitle(),[ms]:this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=bs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Qt(bs);const vs=".bs.scrollspy",ys=`activate${vs}`,ws=`click${vs}`,Es=`load${vs}.data-api`,As="active",Ts="[href]",Cs=".nav-link",Os=`${Cs}, .nav-item > ${Cs}, .list-group-item`,xs={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},ks={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class Ls extends ve{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return xs}static get DefaultType(){return ks}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=Ht(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(fe.off(this._config.target,ws),fe.on(this._config.target,ws,Ts,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=we.find(Ts,this._config.target);for(const e of t){if(!e.hash||Wt(e))continue;const t=we.findOne(decodeURI(e.hash),this._element);Bt(t)&&(this._targetLinks.set(decodeURI(e.hash),e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(As),this._activateParents(t),fe.trigger(this._element,ys,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))we.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add(As);else for(const e of we.parents(t,".nav, .list-group"))for(const t of we.prev(e,Os))t.classList.add(As)}_clearActiveClass(t){t.classList.remove(As);const e=we.find(`${Ts}.${As}`,t);for(const t of e)t.classList.remove(As)}static jQueryInterface(t){return this.each((function(){const e=Ls.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}fe.on(window,Es,(()=>{for(const t of we.find('[data-bs-spy="scroll"]'))Ls.getOrCreateInstance(t)})),Qt(Ls);const Ss=".bs.tab",Ds=`hide${Ss}`,$s=`hidden${Ss}`,Is=`show${Ss}`,Ns=`shown${Ss}`,Ps=`click${Ss}`,Ms=`keydown${Ss}`,js=`load${Ss}`,Fs="ArrowLeft",Hs="ArrowRight",Bs="ArrowUp",Ws="ArrowDown",zs="Home",Rs="End",qs="active",Vs="fade",Ys="show",Ks=".dropdown-toggle",Qs=`:not(${Ks})`,Xs='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',Us=`.nav-link${Qs}, .list-group-item${Qs}, [role="tab"]${Qs}, ${Xs}`,Gs=`.${qs}[data-bs-toggle="tab"], .${qs}[data-bs-toggle="pill"], .${qs}[data-bs-toggle="list"]`;class Js extends ve{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),fe.on(this._element,Ms,(t=>this._keydown(t))))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?fe.trigger(e,Ds,{relatedTarget:t}):null;fe.trigger(t,Is,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(qs),this._activate(we.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),fe.trigger(t,Ns,{relatedTarget:e})):t.classList.add(Ys)}),t,t.classList.contains(Vs)))}_deactivate(t,e){t&&(t.classList.remove(qs),t.blur(),this._deactivate(we.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),fe.trigger(t,$s,{relatedTarget:e})):t.classList.remove(Ys)}),t,t.classList.contains(Vs)))}_keydown(t){if(![Fs,Hs,Bs,Ws,zs,Rs].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=this._getChildren().filter((t=>!Wt(t)));let i;if([zs,Rs].includes(t.key))i=e[t.key===zs?0:e.length-1];else{const n=[Hs,Ws].includes(t.key);i=Gt(e,t.target,n,!0)}i&&(i.focus({preventScroll:!0}),Js.getOrCreateInstance(i).show())}_getChildren(){return we.find(Us,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=we.getElementFromSelector(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby",`${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=we.findOne(t,i);s&&s.classList.toggle(n,e)};n(Ks,qs),n(".dropdown-menu",Ys),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(qs)}_getInnerElement(t){return t.matches(Us)?t:we.findOne(Us,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Js.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}fe.on(document,Ps,Xs,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),Wt(this)||Js.getOrCreateInstance(this).show()})),fe.on(window,js,(()=>{for(const t of we.find(Gs))Js.getOrCreateInstance(t)})),Qt(Js);const Zs=".bs.toast",to=`mouseover${Zs}`,eo=`mouseout${Zs}`,io=`focusin${Zs}`,no=`focusout${Zs}`,so=`hide${Zs}`,oo=`hidden${Zs}`,ro=`show${Zs}`,ao=`shown${Zs}`,lo="hide",co="show",ho="showing",uo={animation:"boolean",autohide:"boolean",delay:"number"},fo={animation:!0,autohide:!0,delay:5e3};class po extends ve{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return fo}static get DefaultType(){return uo}static get NAME(){return"toast"}show(){fe.trigger(this._element,ro).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(lo),qt(this._element),this._element.classList.add(co,ho),this._queueCallback((()=>{this._element.classList.remove(ho),fe.trigger(this._element,ao),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(fe.trigger(this._element,so).defaultPrevented||(this._element.classList.add(ho),this._queueCallback((()=>{this._element.classList.add(lo),this._element.classList.remove(ho,co),fe.trigger(this._element,oo)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove(co),super.dispose()}isShown(){return this._element.classList.contains(co)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){fe.on(this._element,to,(t=>this._onInteraction(t,!0))),fe.on(this._element,eo,(t=>this._onInteraction(t,!1))),fe.on(this._element,io,(t=>this._onInteraction(t,!0))),fe.on(this._element,no,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=po.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}function mo(t){"loading"!=document.readyState?t():document.addEventListener("DOMContentLoaded",t)}Ee(po),Qt(po),mo((function(){[].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')).map((function(t){return new fs(t,{delay:{show:500,hide:100}})}))})),mo((function(){document.getElementById("pst-back-to-top").addEventListener("click",(function(){document.body.scrollTop=0,document.documentElement.scrollTop=0}))})),mo((function(){var t=document.getElementById("pst-back-to-top"),e=document.getElementsByClassName("bd-header")[0].getBoundingClientRect();window.addEventListener("scroll",(function(){this.oldScroll>this.scrollY&&this.scrollY>e.bottom?t.style.display="block":t.style.display="none",this.oldScroll=this.scrollY}))})),window.bootstrap=i})(); +//# sourceMappingURL=bootstrap.js.map \ No newline at end of file diff --git a/dev/docs/html/_static/scripts/bootstrap.js.LICENSE.txt b/dev/docs/html/_static/scripts/bootstrap.js.LICENSE.txt new file mode 100644 index 0000000..28755c2 --- /dev/null +++ b/dev/docs/html/_static/scripts/bootstrap.js.LICENSE.txt @@ -0,0 +1,5 @@ +/*! + * Bootstrap v5.3.3 (https://getbootstrap.com/) + * Copyright 2011-2024 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */ diff --git a/dev/docs/html/_static/scripts/bootstrap.js.map b/dev/docs/html/_static/scripts/bootstrap.js.map new file mode 100644 index 0000000..e9e8158 --- /dev/null +++ b/dev/docs/html/_static/scripts/bootstrap.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/bootstrap.js","mappings":";mBACA,IAAIA,EAAsB,CCA1BA,EAAwB,CAACC,EAASC,KACjC,IAAI,IAAIC,KAAOD,EACXF,EAAoBI,EAAEF,EAAYC,KAASH,EAAoBI,EAAEH,EAASE,IAC5EE,OAAOC,eAAeL,EAASE,EAAK,CAAEI,YAAY,EAAMC,IAAKN,EAAWC,IAE1E,ECNDH,EAAwB,CAACS,EAAKC,IAAUL,OAAOM,UAAUC,eAAeC,KAAKJ,EAAKC,GCClFV,EAAyBC,IACH,oBAAXa,QAA0BA,OAAOC,aAC1CV,OAAOC,eAAeL,EAASa,OAAOC,YAAa,CAAEC,MAAO,WAE7DX,OAAOC,eAAeL,EAAS,aAAc,CAAEe,OAAO,GAAO,01BCLvD,IAAI,EAAM,MACNC,EAAS,SACTC,EAAQ,QACRC,EAAO,OACPC,EAAO,OACPC,EAAiB,CAAC,EAAKJ,EAAQC,EAAOC,GACtCG,EAAQ,QACRC,EAAM,MACNC,EAAkB,kBAClBC,EAAW,WACXC,EAAS,SACTC,EAAY,YACZC,EAAmCP,EAAeQ,QAAO,SAAUC,EAAKC,GACjF,OAAOD,EAAIE,OAAO,CAACD,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAChE,GAAG,IACQ,EAA0B,GAAGS,OAAOX,EAAgB,CAACD,IAAOS,QAAO,SAAUC,EAAKC,GAC3F,OAAOD,EAAIE,OAAO,CAACD,EAAWA,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAC3E,GAAG,IAEQU,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAc,cACdC,EAAQ,QACRC,EAAa,aACbC,EAAiB,CAACT,EAAYC,EAAMC,EAAWC,EAAYC,EAAMC,EAAWC,EAAaC,EAAOC,GC9B5F,SAASE,EAAYC,GAClC,OAAOA,GAAWA,EAAQC,UAAY,IAAIC,cAAgB,IAC5D,CCFe,SAASC,EAAUC,GAChC,GAAY,MAARA,EACF,OAAOC,OAGT,GAAwB,oBAApBD,EAAKE,WAAkC,CACzC,IAAIC,EAAgBH,EAAKG,cACzB,OAAOA,GAAgBA,EAAcC,aAAwBH,MAC/D,CAEA,OAAOD,CACT,CCTA,SAASK,EAAUL,GAEjB,OAAOA,aADUD,EAAUC,GAAMM,SACIN,aAAgBM,OACvD,CAEA,SAASC,EAAcP,GAErB,OAAOA,aADUD,EAAUC,GAAMQ,aACIR,aAAgBQ,WACvD,CAEA,SAASC,EAAaT,GAEpB,MAA0B,oBAAfU,aAKJV,aADUD,EAAUC,GAAMU,YACIV,aAAgBU,WACvD,CCwDA,SACEC,KAAM,cACNC,SAAS,EACTC,MAAO,QACPC,GA5EF,SAAqBC,GACnB,IAAIC,EAAQD,EAAKC,MACjB3D,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIS,EAAQJ,EAAMK,OAAOV,IAAS,CAAC,EAC/BW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EACxCf,EAAUoB,EAAME,SAASP,GAExBJ,EAAcX,IAAaD,EAAYC,KAO5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUR,GACxC,IAAI3C,EAAQsD,EAAWX,IAET,IAAV3C,EACF4B,EAAQ4B,gBAAgBb,GAExBf,EAAQ6B,aAAad,GAAgB,IAAV3C,EAAiB,GAAKA,EAErD,IACF,GACF,EAoDE0D,OAlDF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MACdY,EAAgB,CAClBlD,OAAQ,CACNmD,SAAUb,EAAMc,QAAQC,SACxB5D,KAAM,IACN6D,IAAK,IACLC,OAAQ,KAEVC,MAAO,CACLL,SAAU,YAEZlD,UAAW,CAAC,GASd,OAPAtB,OAAOkE,OAAOP,EAAME,SAASxC,OAAO0C,MAAOQ,EAAclD,QACzDsC,EAAMK,OAASO,EAEXZ,EAAME,SAASgB,OACjB7E,OAAOkE,OAAOP,EAAME,SAASgB,MAAMd,MAAOQ,EAAcM,OAGnD,WACL7E,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIf,EAAUoB,EAAME,SAASP,GACzBW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EAGxCS,EAFkB/D,OAAO4D,KAAKD,EAAMK,OAAOzD,eAAe+C,GAAQK,EAAMK,OAAOV,GAAQiB,EAAcjB,IAE7E9B,QAAO,SAAUuC,EAAOe,GAElD,OADAf,EAAMe,GAAY,GACXf,CACT,GAAG,CAAC,GAECb,EAAcX,IAAaD,EAAYC,KAI5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUiB,GACxCxC,EAAQ4B,gBAAgBY,EAC1B,IACF,GACF,CACF,EASEC,SAAU,CAAC,kBCjFE,SAASC,EAAiBvD,GACvC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCHO,IAAI,EAAMC,KAAKC,IACX,EAAMD,KAAKE,IACXC,EAAQH,KAAKG,MCFT,SAASC,IACtB,IAAIC,EAASC,UAAUC,cAEvB,OAAc,MAAVF,GAAkBA,EAAOG,QAAUC,MAAMC,QAAQL,EAAOG,QACnDH,EAAOG,OAAOG,KAAI,SAAUC,GACjC,OAAOA,EAAKC,MAAQ,IAAMD,EAAKE,OACjC,IAAGC,KAAK,KAGHT,UAAUU,SACnB,CCTe,SAASC,IACtB,OAAQ,iCAAiCC,KAAKd,IAChD,CCCe,SAASe,EAAsB/D,EAASgE,EAAcC,QAC9C,IAAjBD,IACFA,GAAe,QAGO,IAApBC,IACFA,GAAkB,GAGpB,IAAIC,EAAalE,EAAQ+D,wBACrBI,EAAS,EACTC,EAAS,EAETJ,GAAgBrD,EAAcX,KAChCmE,EAASnE,EAAQqE,YAAc,GAAItB,EAAMmB,EAAWI,OAAStE,EAAQqE,aAAmB,EACxFD,EAASpE,EAAQuE,aAAe,GAAIxB,EAAMmB,EAAWM,QAAUxE,EAAQuE,cAAoB,GAG7F,IACIE,GADOhE,EAAUT,GAAWG,EAAUH,GAAWK,QAC3BoE,eAEtBC,GAAoBb,KAAsBI,EAC1CU,GAAKT,EAAW3F,MAAQmG,GAAoBD,EAAiBA,EAAeG,WAAa,IAAMT,EAC/FU,GAAKX,EAAW9B,KAAOsC,GAAoBD,EAAiBA,EAAeK,UAAY,IAAMV,EAC7FE,EAAQJ,EAAWI,MAAQH,EAC3BK,EAASN,EAAWM,OAASJ,EACjC,MAAO,CACLE,MAAOA,EACPE,OAAQA,EACRpC,IAAKyC,EACLvG,MAAOqG,EAAIL,EACXjG,OAAQwG,EAAIL,EACZjG,KAAMoG,EACNA,EAAGA,EACHE,EAAGA,EAEP,CCrCe,SAASE,EAAc/E,GACpC,IAAIkE,EAAaH,EAAsB/D,GAGnCsE,EAAQtE,EAAQqE,YAChBG,EAASxE,EAAQuE,aAUrB,OARI3B,KAAKoC,IAAId,EAAWI,MAAQA,IAAU,IACxCA,EAAQJ,EAAWI,OAGjB1B,KAAKoC,IAAId,EAAWM,OAASA,IAAW,IAC1CA,EAASN,EAAWM,QAGf,CACLG,EAAG3E,EAAQ4E,WACXC,EAAG7E,EAAQ8E,UACXR,MAAOA,EACPE,OAAQA,EAEZ,CCvBe,SAASS,EAASC,EAAQC,GACvC,IAAIC,EAAWD,EAAME,aAAeF,EAAME,cAE1C,GAAIH,EAAOD,SAASE,GAClB,OAAO,EAEJ,GAAIC,GAAYvE,EAAauE,GAAW,CACzC,IAAIE,EAAOH,EAEX,EAAG,CACD,GAAIG,GAAQJ,EAAOK,WAAWD,GAC5B,OAAO,EAITA,EAAOA,EAAKE,YAAcF,EAAKG,IACjC,OAASH,EACX,CAGF,OAAO,CACT,CCrBe,SAAS,EAAiBtF,GACvC,OAAOG,EAAUH,GAAS0F,iBAAiB1F,EAC7C,CCFe,SAAS2F,EAAe3F,GACrC,MAAO,CAAC,QAAS,KAAM,MAAM4F,QAAQ7F,EAAYC,KAAa,CAChE,CCFe,SAAS6F,EAAmB7F,GAEzC,QAASS,EAAUT,GAAWA,EAAQO,cACtCP,EAAQ8F,WAAazF,OAAOyF,UAAUC,eACxC,CCFe,SAASC,EAAchG,GACpC,MAA6B,SAAzBD,EAAYC,GACPA,EAMPA,EAAQiG,cACRjG,EAAQwF,aACR3E,EAAab,GAAWA,EAAQyF,KAAO,OAEvCI,EAAmB7F,EAGvB,CCVA,SAASkG,EAAoBlG,GAC3B,OAAKW,EAAcX,IACoB,UAAvC,EAAiBA,GAASiC,SAInBjC,EAAQmG,aAHN,IAIX,CAwCe,SAASC,EAAgBpG,GAItC,IAHA,IAAIK,EAASF,EAAUH,GACnBmG,EAAeD,EAAoBlG,GAEhCmG,GAAgBR,EAAeQ,IAA6D,WAA5C,EAAiBA,GAAclE,UACpFkE,EAAeD,EAAoBC,GAGrC,OAAIA,IAA+C,SAA9BpG,EAAYoG,IAA0D,SAA9BpG,EAAYoG,IAAwE,WAA5C,EAAiBA,GAAclE,UAC3H5B,EAGF8F,GAhDT,SAA4BnG,GAC1B,IAAIqG,EAAY,WAAWvC,KAAKd,KAGhC,GAFW,WAAWc,KAAKd,MAEfrC,EAAcX,IAII,UAFX,EAAiBA,GAEnBiC,SACb,OAAO,KAIX,IAAIqE,EAAcN,EAAchG,GAMhC,IAJIa,EAAayF,KACfA,EAAcA,EAAYb,MAGrB9E,EAAc2F,IAAgB,CAAC,OAAQ,QAAQV,QAAQ7F,EAAYuG,IAAgB,GAAG,CAC3F,IAAIC,EAAM,EAAiBD,GAI3B,GAAsB,SAAlBC,EAAIC,WAA4C,SAApBD,EAAIE,aAA0C,UAAhBF,EAAIG,UAAiF,IAA1D,CAAC,YAAa,eAAed,QAAQW,EAAII,aAAsBN,GAAgC,WAAnBE,EAAII,YAA2BN,GAAaE,EAAIK,QAAyB,SAAfL,EAAIK,OACjO,OAAON,EAEPA,EAAcA,EAAYd,UAE9B,CAEA,OAAO,IACT,CAgByBqB,CAAmB7G,IAAYK,CACxD,CCpEe,SAASyG,EAAyB3H,GAC/C,MAAO,CAAC,MAAO,UAAUyG,QAAQzG,IAAc,EAAI,IAAM,GAC3D,CCDO,SAAS4H,EAAOjE,EAAK1E,EAAOyE,GACjC,OAAO,EAAQC,EAAK,EAAQ1E,EAAOyE,GACrC,CCFe,SAASmE,EAAmBC,GACzC,OAAOxJ,OAAOkE,OAAO,CAAC,ECDf,CACLS,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GDHuC0I,EACjD,CEHe,SAASC,EAAgB9I,EAAOiD,GAC7C,OAAOA,EAAKpC,QAAO,SAAUkI,EAAS5J,GAEpC,OADA4J,EAAQ5J,GAAOa,EACR+I,CACT,GAAG,CAAC,EACN,CC4EA,SACEpG,KAAM,QACNC,SAAS,EACTC,MAAO,OACPC,GApEF,SAAeC,GACb,IAAIiG,EAEAhG,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZmB,EAAUf,EAAKe,QACfmF,EAAejG,EAAME,SAASgB,MAC9BgF,EAAgBlG,EAAMmG,cAAcD,cACpCE,EAAgB9E,EAAiBtB,EAAMjC,WACvCsI,EAAOX,EAAyBU,GAEhCE,EADa,CAACnJ,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAClC,SAAW,QAElC,GAAKH,GAAiBC,EAAtB,CAIA,IAAIL,EAxBgB,SAAyBU,EAASvG,GAItD,OAAO4F,EAAsC,iBAH7CW,EAA6B,mBAAZA,EAAyBA,EAAQlK,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CAC/EzI,UAAWiC,EAAMjC,aACbwI,GACkDA,EAAUT,EAAgBS,EAASlJ,GAC7F,CAmBsBoJ,CAAgB3F,EAAQyF,QAASvG,GACjD0G,EAAY/C,EAAcsC,GAC1BU,EAAmB,MAATN,EAAe,EAAMlJ,EAC/ByJ,EAAmB,MAATP,EAAepJ,EAASC,EAClC2J,EAAU7G,EAAMwG,MAAM7I,UAAU2I,GAAOtG,EAAMwG,MAAM7I,UAAU0I,GAAQH,EAAcG,GAAQrG,EAAMwG,MAAM9I,OAAO4I,GAC9GQ,EAAYZ,EAAcG,GAAQrG,EAAMwG,MAAM7I,UAAU0I,GACxDU,EAAoB/B,EAAgBiB,GACpCe,EAAaD,EAA6B,MAATV,EAAeU,EAAkBE,cAAgB,EAAIF,EAAkBG,aAAe,EAAI,EAC3HC,EAAoBN,EAAU,EAAIC,EAAY,EAG9CpF,EAAMmE,EAAcc,GACpBlF,EAAMuF,EAAaN,EAAUJ,GAAOT,EAAce,GAClDQ,EAASJ,EAAa,EAAIN,EAAUJ,GAAO,EAAIa,EAC/CE,EAAS1B,EAAOjE,EAAK0F,EAAQ3F,GAE7B6F,EAAWjB,EACfrG,EAAMmG,cAAcxG,KAASqG,EAAwB,CAAC,GAAyBsB,GAAYD,EAAQrB,EAAsBuB,aAAeF,EAASD,EAAQpB,EAnBzJ,CAoBF,EAkCEtF,OAhCF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MAEdwH,EADU7G,EAAMG,QACWlC,QAC3BqH,OAAoC,IAArBuB,EAA8B,sBAAwBA,EAErD,MAAhBvB,IAKwB,iBAAjBA,IACTA,EAAejG,EAAME,SAASxC,OAAO+J,cAAcxB,MAOhDpC,EAAS7D,EAAME,SAASxC,OAAQuI,KAIrCjG,EAAME,SAASgB,MAAQ+E,EACzB,EASE5E,SAAU,CAAC,iBACXqG,iBAAkB,CAAC,oBCxFN,SAASC,EAAa5J,GACnC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCOA,IAAIqG,GAAa,CACf5G,IAAK,OACL9D,MAAO,OACPD,OAAQ,OACRE,KAAM,QAeD,SAAS0K,GAAYlH,GAC1B,IAAImH,EAEApK,EAASiD,EAAMjD,OACfqK,EAAapH,EAAMoH,WACnBhK,EAAY4C,EAAM5C,UAClBiK,EAAYrH,EAAMqH,UAClBC,EAAUtH,EAAMsH,QAChBpH,EAAWF,EAAME,SACjBqH,EAAkBvH,EAAMuH,gBACxBC,EAAWxH,EAAMwH,SACjBC,EAAezH,EAAMyH,aACrBC,EAAU1H,EAAM0H,QAChBC,EAAaL,EAAQ1E,EACrBA,OAAmB,IAAf+E,EAAwB,EAAIA,EAChCC,EAAaN,EAAQxE,EACrBA,OAAmB,IAAf8E,EAAwB,EAAIA,EAEhCC,EAAgC,mBAAjBJ,EAA8BA,EAAa,CAC5D7E,EAAGA,EACHE,IACG,CACHF,EAAGA,EACHE,GAGFF,EAAIiF,EAAMjF,EACVE,EAAI+E,EAAM/E,EACV,IAAIgF,EAAOR,EAAQrL,eAAe,KAC9B8L,EAAOT,EAAQrL,eAAe,KAC9B+L,EAAQxL,EACRyL,EAAQ,EACRC,EAAM5J,OAEV,GAAIkJ,EAAU,CACZ,IAAIpD,EAAeC,EAAgBtH,GAC/BoL,EAAa,eACbC,EAAY,cAEZhE,IAAiBhG,EAAUrB,IAGmB,WAA5C,EAFJqH,EAAeN,EAAmB/G,IAECmD,UAAsC,aAAbA,IAC1DiI,EAAa,eACbC,EAAY,gBAOZhL,IAAc,IAAQA,IAAcZ,GAAQY,IAAcb,IAAU8K,IAAczK,KACpFqL,EAAQ3L,EAGRwG,IAFc4E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeD,OACzF2B,EAAa+D,IACEf,EAAW3E,OAC1BK,GAAKyE,EAAkB,GAAK,GAG1BnK,IAAcZ,IAASY,IAAc,GAAOA,IAAcd,GAAW+K,IAAczK,KACrFoL,EAAQzL,EAGRqG,IAFc8E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeH,MACzF6B,EAAagE,IACEhB,EAAW7E,MAC1BK,GAAK2E,EAAkB,GAAK,EAEhC,CAEA,IAgBMc,EAhBFC,EAAe5M,OAAOkE,OAAO,CAC/BM,SAAUA,GACTsH,GAAYP,IAEXsB,GAAyB,IAAjBd,EAlFd,SAA2BrI,EAAM8I,GAC/B,IAAItF,EAAIxD,EAAKwD,EACTE,EAAI1D,EAAK0D,EACT0F,EAAMN,EAAIO,kBAAoB,EAClC,MAAO,CACL7F,EAAG5B,EAAM4B,EAAI4F,GAAOA,GAAO,EAC3B1F,EAAG9B,EAAM8B,EAAI0F,GAAOA,GAAO,EAE/B,CA0EsCE,CAAkB,CACpD9F,EAAGA,EACHE,GACC1E,EAAUrB,IAAW,CACtB6F,EAAGA,EACHE,GAMF,OAHAF,EAAI2F,EAAM3F,EACVE,EAAIyF,EAAMzF,EAENyE,EAGK7L,OAAOkE,OAAO,CAAC,EAAG0I,IAAeD,EAAiB,CAAC,GAAkBJ,GAASF,EAAO,IAAM,GAAIM,EAAeL,GAASF,EAAO,IAAM,GAAIO,EAAe5D,WAAayD,EAAIO,kBAAoB,IAAM,EAAI,aAAe7F,EAAI,OAASE,EAAI,MAAQ,eAAiBF,EAAI,OAASE,EAAI,SAAUuF,IAG5R3M,OAAOkE,OAAO,CAAC,EAAG0I,IAAenB,EAAkB,CAAC,GAAmBc,GAASF,EAAOjF,EAAI,KAAO,GAAIqE,EAAgBa,GAASF,EAAOlF,EAAI,KAAO,GAAIuE,EAAgB1C,UAAY,GAAI0C,GAC9L,CA4CA,UACEnI,KAAM,gBACNC,SAAS,EACTC,MAAO,cACPC,GA9CF,SAAuBwJ,GACrB,IAAItJ,EAAQsJ,EAAMtJ,MACdc,EAAUwI,EAAMxI,QAChByI,EAAwBzI,EAAQoH,gBAChCA,OAA4C,IAA1BqB,GAA0CA,EAC5DC,EAAoB1I,EAAQqH,SAC5BA,OAAiC,IAAtBqB,GAAsCA,EACjDC,EAAwB3I,EAAQsH,aAChCA,OAAyC,IAA1BqB,GAA0CA,EACzDR,EAAe,CACjBlL,UAAWuD,EAAiBtB,EAAMjC,WAClCiK,UAAWL,EAAa3H,EAAMjC,WAC9BL,OAAQsC,EAAME,SAASxC,OACvBqK,WAAY/H,EAAMwG,MAAM9I,OACxBwK,gBAAiBA,EACjBG,QAAoC,UAA3BrI,EAAMc,QAAQC,UAGgB,MAArCf,EAAMmG,cAAcD,gBACtBlG,EAAMK,OAAO3C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAO3C,OAAQmK,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACvGhB,QAASjI,EAAMmG,cAAcD,cAC7BrF,SAAUb,EAAMc,QAAQC,SACxBoH,SAAUA,EACVC,aAAcA,OAIe,MAA7BpI,EAAMmG,cAAcjF,QACtBlB,EAAMK,OAAOa,MAAQ7E,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAOa,MAAO2G,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACrGhB,QAASjI,EAAMmG,cAAcjF,MAC7BL,SAAU,WACVsH,UAAU,EACVC,aAAcA,OAIlBpI,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,wBAAyBsC,EAAMjC,WAEnC,EAQE2L,KAAM,CAAC,GCrKT,IAAIC,GAAU,CACZA,SAAS,GAsCX,UACEhK,KAAM,iBACNC,SAAS,EACTC,MAAO,QACPC,GAAI,WAAe,EACnBY,OAxCF,SAAgBX,GACd,IAAIC,EAAQD,EAAKC,MACb4J,EAAW7J,EAAK6J,SAChB9I,EAAUf,EAAKe,QACf+I,EAAkB/I,EAAQgJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAkBjJ,EAAQkJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7C9K,EAASF,EAAUiB,EAAME,SAASxC,QAClCuM,EAAgB,GAAGjM,OAAOgC,EAAMiK,cAActM,UAAWqC,EAAMiK,cAAcvM,QAYjF,OAVIoM,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaC,iBAAiB,SAAUP,EAASQ,OAAQT,GAC3D,IAGEK,GACF/K,EAAOkL,iBAAiB,SAAUP,EAASQ,OAAQT,IAG9C,WACDG,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaG,oBAAoB,SAAUT,EAASQ,OAAQT,GAC9D,IAGEK,GACF/K,EAAOoL,oBAAoB,SAAUT,EAASQ,OAAQT,GAE1D,CACF,EASED,KAAM,CAAC,GC/CT,IAAIY,GAAO,CACTnN,KAAM,QACND,MAAO,OACPD,OAAQ,MACR+D,IAAK,UAEQ,SAASuJ,GAAqBxM,GAC3C,OAAOA,EAAUyM,QAAQ,0BAA0B,SAAUC,GAC3D,OAAOH,GAAKG,EACd,GACF,CCVA,IAAI,GAAO,CACTnN,MAAO,MACPC,IAAK,SAEQ,SAASmN,GAA8B3M,GACpD,OAAOA,EAAUyM,QAAQ,cAAc,SAAUC,GAC/C,OAAO,GAAKA,EACd,GACF,CCPe,SAASE,GAAgB3L,GACtC,IAAI6J,EAAM9J,EAAUC,GAGpB,MAAO,CACL4L,WAHe/B,EAAIgC,YAInBC,UAHcjC,EAAIkC,YAKtB,CCNe,SAASC,GAAoBpM,GAQ1C,OAAO+D,EAAsB8B,EAAmB7F,IAAUzB,KAAOwN,GAAgB/L,GAASgM,UAC5F,CCXe,SAASK,GAAerM,GAErC,IAAIsM,EAAoB,EAAiBtM,GACrCuM,EAAWD,EAAkBC,SAC7BC,EAAYF,EAAkBE,UAC9BC,EAAYH,EAAkBG,UAElC,MAAO,6BAA6B3I,KAAKyI,EAAWE,EAAYD,EAClE,CCLe,SAASE,GAAgBtM,GACtC,MAAI,CAAC,OAAQ,OAAQ,aAAawF,QAAQ7F,EAAYK,KAAU,EAEvDA,EAAKG,cAAcoM,KAGxBhM,EAAcP,IAASiM,GAAejM,GACjCA,EAGFsM,GAAgB1G,EAAc5F,GACvC,CCJe,SAASwM,GAAkB5M,EAAS6M,GACjD,IAAIC,OAES,IAATD,IACFA,EAAO,IAGT,IAAIvB,EAAeoB,GAAgB1M,GAC/B+M,EAASzB,KAAqE,OAAlDwB,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,MACpH1C,EAAM9J,EAAUmL,GAChB0B,EAASD,EAAS,CAAC9C,GAAK7K,OAAO6K,EAAIxF,gBAAkB,GAAI4H,GAAef,GAAgBA,EAAe,IAAMA,EAC7G2B,EAAcJ,EAAKzN,OAAO4N,GAC9B,OAAOD,EAASE,EAChBA,EAAY7N,OAAOwN,GAAkB5G,EAAcgH,IACrD,CCzBe,SAASE,GAAiBC,GACvC,OAAO1P,OAAOkE,OAAO,CAAC,EAAGwL,EAAM,CAC7B5O,KAAM4O,EAAKxI,EACXvC,IAAK+K,EAAKtI,EACVvG,MAAO6O,EAAKxI,EAAIwI,EAAK7I,MACrBjG,OAAQ8O,EAAKtI,EAAIsI,EAAK3I,QAE1B,CCqBA,SAAS4I,GAA2BpN,EAASqN,EAAgBlL,GAC3D,OAAOkL,IAAmBxO,EAAWqO,GCzBxB,SAAyBlN,EAASmC,GAC/C,IAAI8H,EAAM9J,EAAUH,GAChBsN,EAAOzH,EAAmB7F,GAC1ByE,EAAiBwF,EAAIxF,eACrBH,EAAQgJ,EAAKhF,YACb9D,EAAS8I,EAAKjF,aACd1D,EAAI,EACJE,EAAI,EAER,GAAIJ,EAAgB,CAClBH,EAAQG,EAAeH,MACvBE,EAASC,EAAeD,OACxB,IAAI+I,EAAiB1J,KAEjB0J,IAAmBA,GAA+B,UAAbpL,KACvCwC,EAAIF,EAAeG,WACnBC,EAAIJ,EAAeK,UAEvB,CAEA,MAAO,CACLR,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EAAIyH,GAAoBpM,GAC3B6E,EAAGA,EAEP,CDDwD2I,CAAgBxN,EAASmC,IAAa1B,EAAU4M,GAdxG,SAAoCrN,EAASmC,GAC3C,IAAIgL,EAAOpJ,EAAsB/D,GAAS,EAAoB,UAAbmC,GASjD,OARAgL,EAAK/K,IAAM+K,EAAK/K,IAAMpC,EAAQyN,UAC9BN,EAAK5O,KAAO4O,EAAK5O,KAAOyB,EAAQ0N,WAChCP,EAAK9O,OAAS8O,EAAK/K,IAAMpC,EAAQqI,aACjC8E,EAAK7O,MAAQ6O,EAAK5O,KAAOyB,EAAQsI,YACjC6E,EAAK7I,MAAQtE,EAAQsI,YACrB6E,EAAK3I,OAASxE,EAAQqI,aACtB8E,EAAKxI,EAAIwI,EAAK5O,KACd4O,EAAKtI,EAAIsI,EAAK/K,IACP+K,CACT,CAG0HQ,CAA2BN,EAAgBlL,GAAY+K,GEtBlK,SAAyBlN,GACtC,IAAI8M,EAEAQ,EAAOzH,EAAmB7F,GAC1B4N,EAAY7B,GAAgB/L,GAC5B2M,EAA0D,OAAlDG,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,KAChGrI,EAAQ,EAAIgJ,EAAKO,YAAaP,EAAKhF,YAAaqE,EAAOA,EAAKkB,YAAc,EAAGlB,EAAOA,EAAKrE,YAAc,GACvG9D,EAAS,EAAI8I,EAAKQ,aAAcR,EAAKjF,aAAcsE,EAAOA,EAAKmB,aAAe,EAAGnB,EAAOA,EAAKtE,aAAe,GAC5G1D,GAAKiJ,EAAU5B,WAAaI,GAAoBpM,GAChD6E,GAAK+I,EAAU1B,UAMnB,MAJiD,QAA7C,EAAiBS,GAAQW,GAAMS,YACjCpJ,GAAK,EAAI2I,EAAKhF,YAAaqE,EAAOA,EAAKrE,YAAc,GAAKhE,GAGrD,CACLA,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EACHE,EAAGA,EAEP,CFCkMmJ,CAAgBnI,EAAmB7F,IACrO,CG1Be,SAASiO,GAAe9M,GACrC,IAOIkI,EAPAtK,EAAYoC,EAAKpC,UACjBiB,EAAUmB,EAAKnB,QACfb,EAAYgC,EAAKhC,UACjBqI,EAAgBrI,EAAYuD,EAAiBvD,GAAa,KAC1DiK,EAAYjK,EAAY4J,EAAa5J,GAAa,KAClD+O,EAAUnP,EAAU4F,EAAI5F,EAAUuF,MAAQ,EAAItE,EAAQsE,MAAQ,EAC9D6J,EAAUpP,EAAU8F,EAAI9F,EAAUyF,OAAS,EAAIxE,EAAQwE,OAAS,EAGpE,OAAQgD,GACN,KAAK,EACH6B,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI7E,EAAQwE,QAE3B,MAEF,KAAKnG,EACHgL,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI9F,EAAUyF,QAE7B,MAEF,KAAKlG,EACH+K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI5F,EAAUuF,MAC3BO,EAAGsJ,GAEL,MAEF,KAAK5P,EACH8K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI3E,EAAQsE,MACzBO,EAAGsJ,GAEL,MAEF,QACE9E,EAAU,CACR1E,EAAG5F,EAAU4F,EACbE,EAAG9F,EAAU8F,GAInB,IAAIuJ,EAAW5G,EAAgBV,EAAyBU,GAAiB,KAEzE,GAAgB,MAAZ4G,EAAkB,CACpB,IAAI1G,EAAmB,MAAb0G,EAAmB,SAAW,QAExC,OAAQhF,GACN,KAAK1K,EACH2K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAC7E,MAEF,KAAK/I,EACH0K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAKnF,CAEA,OAAO2B,CACT,CC3De,SAASgF,GAAejN,EAAOc,QAC5B,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACXqM,EAAqBD,EAASnP,UAC9BA,OAAmC,IAAvBoP,EAAgCnN,EAAMjC,UAAYoP,EAC9DC,EAAoBF,EAASnM,SAC7BA,OAAiC,IAAtBqM,EAA+BpN,EAAMe,SAAWqM,EAC3DC,EAAoBH,EAASI,SAC7BA,OAAiC,IAAtBD,EAA+B7P,EAAkB6P,EAC5DE,EAAwBL,EAASM,aACjCA,OAAyC,IAA1BD,EAAmC9P,EAAW8P,EAC7DE,EAAwBP,EAASQ,eACjCA,OAA2C,IAA1BD,EAAmC/P,EAAS+P,EAC7DE,EAAuBT,EAASU,YAChCA,OAAuC,IAAzBD,GAA0CA,EACxDE,EAAmBX,EAAS3G,QAC5BA,OAA+B,IAArBsH,EAA8B,EAAIA,EAC5ChI,EAAgBD,EAAsC,iBAAZW,EAAuBA,EAAUT,EAAgBS,EAASlJ,IACpGyQ,EAAaJ,IAAmBhQ,EAASC,EAAYD,EACrDqK,EAAa/H,EAAMwG,MAAM9I,OACzBkB,EAAUoB,EAAME,SAAS0N,EAAcE,EAAaJ,GACpDK,EJkBS,SAAyBnP,EAAS0O,EAAUE,EAAczM,GACvE,IAAIiN,EAAmC,oBAAbV,EAlB5B,SAA4B1O,GAC1B,IAAIpB,EAAkBgO,GAAkB5G,EAAchG,IAElDqP,EADoB,CAAC,WAAY,SAASzJ,QAAQ,EAAiB5F,GAASiC,WAAa,GACnDtB,EAAcX,GAAWoG,EAAgBpG,GAAWA,EAE9F,OAAKS,EAAU4O,GAKRzQ,EAAgBgI,QAAO,SAAUyG,GACtC,OAAO5M,EAAU4M,IAAmBpI,EAASoI,EAAgBgC,IAAmD,SAAhCtP,EAAYsN,EAC9F,IANS,EAOX,CAK6DiC,CAAmBtP,GAAW,GAAGZ,OAAOsP,GAC/F9P,EAAkB,GAAGQ,OAAOgQ,EAAqB,CAACR,IAClDW,EAAsB3Q,EAAgB,GACtC4Q,EAAe5Q,EAAgBK,QAAO,SAAUwQ,EAASpC,GAC3D,IAAIF,EAAOC,GAA2BpN,EAASqN,EAAgBlL,GAK/D,OAJAsN,EAAQrN,IAAM,EAAI+K,EAAK/K,IAAKqN,EAAQrN,KACpCqN,EAAQnR,MAAQ,EAAI6O,EAAK7O,MAAOmR,EAAQnR,OACxCmR,EAAQpR,OAAS,EAAI8O,EAAK9O,OAAQoR,EAAQpR,QAC1CoR,EAAQlR,KAAO,EAAI4O,EAAK5O,KAAMkR,EAAQlR,MAC/BkR,CACT,GAAGrC,GAA2BpN,EAASuP,EAAqBpN,IAK5D,OAJAqN,EAAalL,MAAQkL,EAAalR,MAAQkR,EAAajR,KACvDiR,EAAahL,OAASgL,EAAanR,OAASmR,EAAapN,IACzDoN,EAAa7K,EAAI6K,EAAajR,KAC9BiR,EAAa3K,EAAI2K,EAAapN,IACvBoN,CACT,CInC2BE,CAAgBjP,EAAUT,GAAWA,EAAUA,EAAQ2P,gBAAkB9J,EAAmBzE,EAAME,SAASxC,QAAS4P,EAAUE,EAAczM,GACjKyN,EAAsB7L,EAAsB3C,EAAME,SAASvC,WAC3DuI,EAAgB2G,GAAe,CACjClP,UAAW6Q,EACX5P,QAASmJ,EACThH,SAAU,WACVhD,UAAWA,IAET0Q,EAAmB3C,GAAiBzP,OAAOkE,OAAO,CAAC,EAAGwH,EAAY7B,IAClEwI,EAAoBhB,IAAmBhQ,EAAS+Q,EAAmBD,EAGnEG,EAAkB,CACpB3N,IAAK+M,EAAmB/M,IAAM0N,EAAkB1N,IAAM6E,EAAc7E,IACpE/D,OAAQyR,EAAkBzR,OAAS8Q,EAAmB9Q,OAAS4I,EAAc5I,OAC7EE,KAAM4Q,EAAmB5Q,KAAOuR,EAAkBvR,KAAO0I,EAAc1I,KACvED,MAAOwR,EAAkBxR,MAAQ6Q,EAAmB7Q,MAAQ2I,EAAc3I,OAExE0R,EAAa5O,EAAMmG,cAAckB,OAErC,GAAIqG,IAAmBhQ,GAAUkR,EAAY,CAC3C,IAAIvH,EAASuH,EAAW7Q,GACxB1B,OAAO4D,KAAK0O,GAAiBxO,SAAQ,SAAUhE,GAC7C,IAAI0S,EAAW,CAAC3R,EAAOD,GAAQuH,QAAQrI,IAAQ,EAAI,GAAK,EACpDkK,EAAO,CAAC,EAAKpJ,GAAQuH,QAAQrI,IAAQ,EAAI,IAAM,IACnDwS,EAAgBxS,IAAQkL,EAAOhB,GAAQwI,CACzC,GACF,CAEA,OAAOF,CACT,CCyEA,UACEhP,KAAM,OACNC,SAAS,EACTC,MAAO,OACPC,GA5HF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KAEhB,IAAIK,EAAMmG,cAAcxG,GAAMmP,MAA9B,CAoCA,IAhCA,IAAIC,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAqCA,EACpDG,EAA8BtO,EAAQuO,mBACtC9I,EAAUzF,EAAQyF,QAClB+G,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtB0B,EAAwBxO,EAAQyO,eAChCA,OAA2C,IAA1BD,GAA0CA,EAC3DE,EAAwB1O,EAAQ0O,sBAChCC,EAAqBzP,EAAMc,QAAQ/C,UACnCqI,EAAgB9E,EAAiBmO,GAEjCJ,EAAqBD,IADHhJ,IAAkBqJ,GACqCF,EAjC/E,SAAuCxR,GACrC,GAAIuD,EAAiBvD,KAAeX,EAClC,MAAO,GAGT,IAAIsS,EAAoBnF,GAAqBxM,GAC7C,MAAO,CAAC2M,GAA8B3M,GAAY2R,EAAmBhF,GAA8BgF,GACrG,CA0B6IC,CAA8BF,GAA3E,CAAClF,GAAqBkF,KAChHG,EAAa,CAACH,GAAoBzR,OAAOqR,GAAoBxR,QAAO,SAAUC,EAAKC,GACrF,OAAOD,EAAIE,OAAOsD,EAAiBvD,KAAeX,ECvCvC,SAA8B4C,EAAOc,QAClC,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACX/C,EAAYmP,EAASnP,UACrBuP,EAAWJ,EAASI,SACpBE,EAAeN,EAASM,aACxBjH,EAAU2G,EAAS3G,QACnBgJ,EAAiBrC,EAASqC,eAC1BM,EAAwB3C,EAASsC,sBACjCA,OAAkD,IAA1BK,EAAmC,EAAgBA,EAC3E7H,EAAYL,EAAa5J,GACzB6R,EAAa5H,EAAYuH,EAAiB3R,EAAsBA,EAAoB4H,QAAO,SAAUzH,GACvG,OAAO4J,EAAa5J,KAAeiK,CACrC,IAAK3K,EACDyS,EAAoBF,EAAWpK,QAAO,SAAUzH,GAClD,OAAOyR,EAAsBhL,QAAQzG,IAAc,CACrD,IAEiC,IAA7B+R,EAAkBC,SACpBD,EAAoBF,GAItB,IAAII,EAAYF,EAAkBjS,QAAO,SAAUC,EAAKC,GAOtD,OANAD,EAAIC,GAAakP,GAAejN,EAAO,CACrCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,IACRjF,EAAiBvD,IACbD,CACT,GAAG,CAAC,GACJ,OAAOzB,OAAO4D,KAAK+P,GAAWC,MAAK,SAAUC,EAAGC,GAC9C,OAAOH,EAAUE,GAAKF,EAAUG,EAClC,GACF,CDC6DC,CAAqBpQ,EAAO,CACnFjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTgJ,eAAgBA,EAChBC,sBAAuBA,IACpBzR,EACP,GAAG,IACCsS,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzB4S,EAAY,IAAIC,IAChBC,GAAqB,EACrBC,EAAwBb,EAAW,GAE9Bc,EAAI,EAAGA,EAAId,EAAWG,OAAQW,IAAK,CAC1C,IAAI3S,EAAY6R,EAAWc,GAEvBC,EAAiBrP,EAAiBvD,GAElC6S,EAAmBjJ,EAAa5J,KAAeT,EAC/CuT,EAAa,CAAC,EAAK5T,GAAQuH,QAAQmM,IAAmB,EACtDrK,EAAMuK,EAAa,QAAU,SAC7B1F,EAAW8B,GAAejN,EAAO,CACnCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdI,YAAaA,EACbrH,QAASA,IAEPuK,EAAoBD,EAAaD,EAAmB1T,EAAQC,EAAOyT,EAAmB3T,EAAS,EAE/FoT,EAAc/J,GAAOyB,EAAWzB,KAClCwK,EAAoBvG,GAAqBuG,IAG3C,IAAIC,EAAmBxG,GAAqBuG,GACxCE,EAAS,GAUb,GARIhC,GACFgC,EAAOC,KAAK9F,EAASwF,IAAmB,GAGtCxB,GACF6B,EAAOC,KAAK9F,EAAS2F,IAAsB,EAAG3F,EAAS4F,IAAqB,GAG1EC,EAAOE,OAAM,SAAUC,GACzB,OAAOA,CACT,IAAI,CACFV,EAAwB1S,EACxByS,GAAqB,EACrB,KACF,CAEAF,EAAUc,IAAIrT,EAAWiT,EAC3B,CAEA,GAAIR,EAqBF,IAnBA,IAEIa,EAAQ,SAAeC,GACzB,IAAIC,EAAmB3B,EAAW4B,MAAK,SAAUzT,GAC/C,IAAIiT,EAASV,EAAU9T,IAAIuB,GAE3B,GAAIiT,EACF,OAAOA,EAAOS,MAAM,EAAGH,GAAIJ,OAAM,SAAUC,GACzC,OAAOA,CACT,GAEJ,IAEA,GAAII,EAEF,OADAd,EAAwBc,EACjB,OAEX,EAESD,EAnBY/B,EAAiB,EAAI,EAmBZ+B,EAAK,GAGpB,UAFFD,EAAMC,GADmBA,KAOpCtR,EAAMjC,YAAc0S,IACtBzQ,EAAMmG,cAAcxG,GAAMmP,OAAQ,EAClC9O,EAAMjC,UAAY0S,EAClBzQ,EAAM0R,OAAQ,EA5GhB,CA8GF,EAQEhK,iBAAkB,CAAC,UACnBgC,KAAM,CACJoF,OAAO,IE7IX,SAAS6C,GAAexG,EAAUY,EAAM6F,GAQtC,YAPyB,IAArBA,IACFA,EAAmB,CACjBrO,EAAG,EACHE,EAAG,IAIA,CACLzC,IAAKmK,EAASnK,IAAM+K,EAAK3I,OAASwO,EAAiBnO,EACnDvG,MAAOiO,EAASjO,MAAQ6O,EAAK7I,MAAQ0O,EAAiBrO,EACtDtG,OAAQkO,EAASlO,OAAS8O,EAAK3I,OAASwO,EAAiBnO,EACzDtG,KAAMgO,EAAShO,KAAO4O,EAAK7I,MAAQ0O,EAAiBrO,EAExD,CAEA,SAASsO,GAAsB1G,GAC7B,MAAO,CAAC,EAAKjO,EAAOD,EAAQE,GAAM2U,MAAK,SAAUC,GAC/C,OAAO5G,EAAS4G,IAAS,CAC3B,GACF,CA+BA,UACEpS,KAAM,OACNC,SAAS,EACTC,MAAO,OACP6H,iBAAkB,CAAC,mBACnB5H,GAlCF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZ0Q,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBkU,EAAmB5R,EAAMmG,cAAc6L,gBACvCC,EAAoBhF,GAAejN,EAAO,CAC5C0N,eAAgB,cAEdwE,EAAoBjF,GAAejN,EAAO,CAC5C4N,aAAa,IAEXuE,EAA2BR,GAAeM,EAAmB5B,GAC7D+B,EAAsBT,GAAeO,EAAmBnK,EAAY6J,GACpES,EAAoBR,GAAsBM,GAC1CG,EAAmBT,GAAsBO,GAC7CpS,EAAMmG,cAAcxG,GAAQ,CAC1BwS,yBAA0BA,EAC1BC,oBAAqBA,EACrBC,kBAAmBA,EACnBC,iBAAkBA,GAEpBtS,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,+BAAgC2U,EAChC,sBAAuBC,GAE3B,GCJA,IACE3S,KAAM,SACNC,SAAS,EACTC,MAAO,OACPwB,SAAU,CAAC,iBACXvB,GA5BF,SAAgBa,GACd,IAAIX,EAAQW,EAAMX,MACdc,EAAUH,EAAMG,QAChBnB,EAAOgB,EAAMhB,KACb4S,EAAkBzR,EAAQuG,OAC1BA,OAA6B,IAApBkL,EAA6B,CAAC,EAAG,GAAKA,EAC/C7I,EAAO,EAAW7L,QAAO,SAAUC,EAAKC,GAE1C,OADAD,EAAIC,GA5BD,SAAiCA,EAAWyI,EAAOa,GACxD,IAAIjB,EAAgB9E,EAAiBvD,GACjCyU,EAAiB,CAACrV,EAAM,GAAKqH,QAAQ4B,IAAkB,GAAK,EAAI,EAEhErG,EAAyB,mBAAXsH,EAAwBA,EAAOhL,OAAOkE,OAAO,CAAC,EAAGiG,EAAO,CACxEzI,UAAWA,KACPsJ,EACFoL,EAAW1S,EAAK,GAChB2S,EAAW3S,EAAK,GAIpB,OAFA0S,EAAWA,GAAY,EACvBC,GAAYA,GAAY,GAAKF,EACtB,CAACrV,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAAI,CACjD7C,EAAGmP,EACHjP,EAAGgP,GACD,CACFlP,EAAGkP,EACHhP,EAAGiP,EAEP,CASqBC,CAAwB5U,EAAWiC,EAAMwG,MAAOa,GAC1DvJ,CACT,GAAG,CAAC,GACA8U,EAAwBlJ,EAAK1J,EAAMjC,WACnCwF,EAAIqP,EAAsBrP,EAC1BE,EAAImP,EAAsBnP,EAEW,MAArCzD,EAAMmG,cAAcD,gBACtBlG,EAAMmG,cAAcD,cAAc3C,GAAKA,EACvCvD,EAAMmG,cAAcD,cAAczC,GAAKA,GAGzCzD,EAAMmG,cAAcxG,GAAQ+J,CAC9B,GC1BA,IACE/J,KAAM,gBACNC,SAAS,EACTC,MAAO,OACPC,GApBF,SAAuBC,GACrB,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KAKhBK,EAAMmG,cAAcxG,GAAQkN,GAAe,CACzClP,UAAWqC,EAAMwG,MAAM7I,UACvBiB,QAASoB,EAAMwG,MAAM9I,OACrBqD,SAAU,WACVhD,UAAWiC,EAAMjC,WAErB,EAQE2L,KAAM,CAAC,GCgHT,IACE/J,KAAM,kBACNC,SAAS,EACTC,MAAO,OACPC,GA/HF,SAAyBC,GACvB,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KACZoP,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAsCA,EACrD3B,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtBrH,EAAUzF,EAAQyF,QAClBsM,EAAkB/R,EAAQgS,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAwBjS,EAAQkS,aAChCA,OAAyC,IAA1BD,EAAmC,EAAIA,EACtD5H,EAAW8B,GAAejN,EAAO,CACnCsN,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTqH,YAAaA,IAEXxH,EAAgB9E,EAAiBtB,EAAMjC,WACvCiK,EAAYL,EAAa3H,EAAMjC,WAC/BkV,GAAmBjL,EACnBgF,EAAWtH,EAAyBU,GACpC8I,ECrCY,MDqCSlC,ECrCH,IAAM,IDsCxB9G,EAAgBlG,EAAMmG,cAAcD,cACpCmK,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBwV,EAA4C,mBAAjBF,EAA8BA,EAAa3W,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CACvGzI,UAAWiC,EAAMjC,aACbiV,EACFG,EAA2D,iBAAtBD,EAAiC,CACxElG,SAAUkG,EACVhE,QAASgE,GACP7W,OAAOkE,OAAO,CAChByM,SAAU,EACVkC,QAAS,GACRgE,GACCE,EAAsBpT,EAAMmG,cAAckB,OAASrH,EAAMmG,cAAckB,OAAOrH,EAAMjC,WAAa,KACjG2L,EAAO,CACTnG,EAAG,EACHE,EAAG,GAGL,GAAKyC,EAAL,CAIA,GAAI8I,EAAe,CACjB,IAAIqE,EAEAC,EAAwB,MAAbtG,EAAmB,EAAM7P,EACpCoW,EAAuB,MAAbvG,EAAmB/P,EAASC,EACtCoJ,EAAmB,MAAb0G,EAAmB,SAAW,QACpC3F,EAASnB,EAAc8G,GACvBtL,EAAM2F,EAAS8D,EAASmI,GACxB7R,EAAM4F,EAAS8D,EAASoI,GACxBC,EAAWV,GAAU/K,EAAWzB,GAAO,EAAI,EAC3CmN,EAASzL,IAAc1K,EAAQ+S,EAAc/J,GAAOyB,EAAWzB,GAC/DoN,EAAS1L,IAAc1K,GAASyK,EAAWzB,IAAQ+J,EAAc/J,GAGjEL,EAAejG,EAAME,SAASgB,MAC9BwF,EAAYoM,GAAU7M,EAAetC,EAAcsC,GAAgB,CACrE/C,MAAO,EACPE,OAAQ,GAENuQ,GAAqB3T,EAAMmG,cAAc,oBAAsBnG,EAAMmG,cAAc,oBAAoBI,QxBhFtG,CACLvF,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GwB6EFyW,GAAkBD,GAAmBL,GACrCO,GAAkBF,GAAmBJ,GAMrCO,GAAWnO,EAAO,EAAG0K,EAAc/J,GAAMI,EAAUJ,IACnDyN,GAAYd,EAAkB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWF,GAAkBT,EAA4BnG,SAAWyG,EAASK,GAAWF,GAAkBT,EAA4BnG,SACxMgH,GAAYf,GAAmB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWD,GAAkBV,EAA4BnG,SAAW0G,EAASI,GAAWD,GAAkBV,EAA4BnG,SACzMjG,GAAoB/G,EAAME,SAASgB,OAAS8D,EAAgBhF,EAAME,SAASgB,OAC3E+S,GAAelN,GAAiC,MAAbiG,EAAmBjG,GAAkBsF,WAAa,EAAItF,GAAkBuF,YAAc,EAAI,EAC7H4H,GAAwH,OAAjGb,EAA+C,MAAvBD,OAA8B,EAASA,EAAoBpG,IAAqBqG,EAAwB,EAEvJc,GAAY9M,EAAS2M,GAAYE,GACjCE,GAAkBzO,EAAOmN,EAAS,EAAQpR,EAF9B2F,EAAS0M,GAAYG,GAAsBD,IAEKvS,EAAK2F,EAAQyL,EAAS,EAAQrR,EAAK0S,IAAa1S,GAChHyE,EAAc8G,GAAYoH,GAC1B1K,EAAKsD,GAAYoH,GAAkB/M,CACrC,CAEA,GAAI8H,EAAc,CAChB,IAAIkF,GAEAC,GAAyB,MAAbtH,EAAmB,EAAM7P,EAErCoX,GAAwB,MAAbvH,EAAmB/P,EAASC,EAEvCsX,GAAUtO,EAAcgJ,GAExBuF,GAAmB,MAAZvF,EAAkB,SAAW,QAEpCwF,GAAOF,GAAUrJ,EAASmJ,IAE1BK,GAAOH,GAAUrJ,EAASoJ,IAE1BK,IAAuD,IAAxC,CAAC,EAAKzX,GAAMqH,QAAQ4B,GAEnCyO,GAAyH,OAAjGR,GAAgD,MAAvBjB,OAA8B,EAASA,EAAoBlE,IAAoBmF,GAAyB,EAEzJS,GAAaF,GAAeF,GAAOF,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAEzI6F,GAAaH,GAAeJ,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAAUyF,GAE5IK,GAAmBlC,GAAU8B,G1BzH9B,SAAwBlT,EAAK1E,EAAOyE,GACzC,IAAIwT,EAAItP,EAAOjE,EAAK1E,EAAOyE,GAC3B,OAAOwT,EAAIxT,EAAMA,EAAMwT,CACzB,C0BsHoDC,CAAeJ,GAAYN,GAASO,IAAcpP,EAAOmN,EAASgC,GAAaJ,GAAMF,GAAS1B,EAASiC,GAAaJ,IAEpKzO,EAAcgJ,GAAW8F,GACzBtL,EAAKwF,GAAW8F,GAAmBR,EACrC,CAEAxU,EAAMmG,cAAcxG,GAAQ+J,CAvE5B,CAwEF,EAQEhC,iBAAkB,CAAC,WE1HN,SAASyN,GAAiBC,EAAyBrQ,EAAcsD,QAC9D,IAAZA,IACFA,GAAU,GAGZ,ICnBoCrJ,ECJOJ,EFuBvCyW,EAA0B9V,EAAcwF,GACxCuQ,EAAuB/V,EAAcwF,IAf3C,SAAyBnG,GACvB,IAAImN,EAAOnN,EAAQ+D,wBACfI,EAASpB,EAAMoK,EAAK7I,OAAStE,EAAQqE,aAAe,EACpDD,EAASrB,EAAMoK,EAAK3I,QAAUxE,EAAQuE,cAAgB,EAC1D,OAAkB,IAAXJ,GAA2B,IAAXC,CACzB,CAU4DuS,CAAgBxQ,GACtEJ,EAAkBF,EAAmBM,GACrCgH,EAAOpJ,EAAsByS,EAAyBE,EAAsBjN,GAC5EyB,EAAS,CACXc,WAAY,EACZE,UAAW,GAET7C,EAAU,CACZ1E,EAAG,EACHE,EAAG,GAkBL,OAfI4R,IAA4BA,IAA4BhN,MACxB,SAA9B1J,EAAYoG,IAChBkG,GAAetG,MACbmF,GCnCgC9K,EDmCT+F,KClCdhG,EAAUC,IAAUO,EAAcP,GCJxC,CACL4L,YAFyChM,EDQbI,GCNR4L,WACpBE,UAAWlM,EAAQkM,WDGZH,GAAgB3L,IDoCnBO,EAAcwF,KAChBkD,EAAUtF,EAAsBoC,GAAc,IACtCxB,GAAKwB,EAAauH,WAC1BrE,EAAQxE,GAAKsB,EAAasH,WACjB1H,IACTsD,EAAQ1E,EAAIyH,GAAoBrG,KAI7B,CACLpB,EAAGwI,EAAK5O,KAAO2M,EAAOc,WAAa3C,EAAQ1E,EAC3CE,EAAGsI,EAAK/K,IAAM8I,EAAOgB,UAAY7C,EAAQxE,EACzCP,MAAO6I,EAAK7I,MACZE,OAAQ2I,EAAK3I,OAEjB,CGvDA,SAASoS,GAAMC,GACb,IAAItT,EAAM,IAAIoO,IACVmF,EAAU,IAAIC,IACdC,EAAS,GAKb,SAAS3F,EAAK4F,GACZH,EAAQI,IAAID,EAASlW,MACN,GAAG3B,OAAO6X,EAASxU,UAAY,GAAIwU,EAASnO,kBAAoB,IACtEvH,SAAQ,SAAU4V,GACzB,IAAKL,EAAQM,IAAID,GAAM,CACrB,IAAIE,EAAc9T,EAAI3F,IAAIuZ,GAEtBE,GACFhG,EAAKgG,EAET,CACF,IACAL,EAAO3E,KAAK4E,EACd,CAQA,OAzBAJ,EAAUtV,SAAQ,SAAU0V,GAC1B1T,EAAIiP,IAAIyE,EAASlW,KAAMkW,EACzB,IAiBAJ,EAAUtV,SAAQ,SAAU0V,GACrBH,EAAQM,IAAIH,EAASlW,OAExBsQ,EAAK4F,EAET,IACOD,CACT,CCvBA,IAAIM,GAAkB,CACpBnY,UAAW,SACX0X,UAAW,GACX1U,SAAU,YAGZ,SAASoV,KACP,IAAK,IAAI1B,EAAO2B,UAAUrG,OAAQsG,EAAO,IAAIpU,MAAMwS,GAAO6B,EAAO,EAAGA,EAAO7B,EAAM6B,IAC/ED,EAAKC,GAAQF,UAAUE,GAGzB,OAAQD,EAAKvE,MAAK,SAAUlT,GAC1B,QAASA,GAAoD,mBAAlCA,EAAQ+D,sBACrC,GACF,CAEO,SAAS4T,GAAgBC,QACL,IAArBA,IACFA,EAAmB,CAAC,GAGtB,IAAIC,EAAoBD,EACpBE,EAAwBD,EAAkBE,iBAC1CA,OAA6C,IAA1BD,EAAmC,GAAKA,EAC3DE,EAAyBH,EAAkBI,eAC3CA,OAA4C,IAA3BD,EAAoCV,GAAkBU,EAC3E,OAAO,SAAsBjZ,EAAWD,EAAQoD,QAC9B,IAAZA,IACFA,EAAU+V,GAGZ,ICxC6B/W,EAC3BgX,EDuCE9W,EAAQ,CACVjC,UAAW,SACXgZ,iBAAkB,GAClBjW,QAASzE,OAAOkE,OAAO,CAAC,EAAG2V,GAAiBW,GAC5C1Q,cAAe,CAAC,EAChBjG,SAAU,CACRvC,UAAWA,EACXD,OAAQA,GAEV4C,WAAY,CAAC,EACbD,OAAQ,CAAC,GAEP2W,EAAmB,GACnBC,GAAc,EACdrN,EAAW,CACb5J,MAAOA,EACPkX,WAAY,SAAoBC,GAC9B,IAAIrW,EAAsC,mBAArBqW,EAAkCA,EAAiBnX,EAAMc,SAAWqW,EACzFC,IACApX,EAAMc,QAAUzE,OAAOkE,OAAO,CAAC,EAAGsW,EAAgB7W,EAAMc,QAASA,GACjEd,EAAMiK,cAAgB,CACpBtM,UAAW0B,EAAU1B,GAAa6N,GAAkB7N,GAAaA,EAAU4Q,eAAiB/C,GAAkB7N,EAAU4Q,gBAAkB,GAC1I7Q,OAAQ8N,GAAkB9N,IAI5B,IElE4B+X,EAC9B4B,EFiEMN,EDhCG,SAAwBtB,GAErC,IAAIsB,EAAmBvB,GAAMC,GAE7B,OAAO/W,EAAeb,QAAO,SAAUC,EAAK+B,GAC1C,OAAO/B,EAAIE,OAAO+Y,EAAiBvR,QAAO,SAAUqQ,GAClD,OAAOA,EAAShW,QAAUA,CAC5B,IACF,GAAG,GACL,CCuB+ByX,EElEK7B,EFkEsB,GAAGzX,OAAO2Y,EAAkB3W,EAAMc,QAAQ2U,WEjE9F4B,EAAS5B,EAAU5X,QAAO,SAAUwZ,EAAQE,GAC9C,IAAIC,EAAWH,EAAOE,EAAQ5X,MAK9B,OAJA0X,EAAOE,EAAQ5X,MAAQ6X,EAAWnb,OAAOkE,OAAO,CAAC,EAAGiX,EAAUD,EAAS,CACrEzW,QAASzE,OAAOkE,OAAO,CAAC,EAAGiX,EAAS1W,QAASyW,EAAQzW,SACrD4I,KAAMrN,OAAOkE,OAAO,CAAC,EAAGiX,EAAS9N,KAAM6N,EAAQ7N,QAC5C6N,EACEF,CACT,GAAG,CAAC,GAEGhb,OAAO4D,KAAKoX,GAAQlV,KAAI,SAAUhG,GACvC,OAAOkb,EAAOlb,EAChB,MF4DM,OAJA6D,EAAM+W,iBAAmBA,EAAiBvR,QAAO,SAAUiS,GACzD,OAAOA,EAAE7X,OACX,IA+FFI,EAAM+W,iBAAiB5W,SAAQ,SAAUJ,GACvC,IAAIJ,EAAOI,EAAKJ,KACZ+X,EAAe3X,EAAKe,QACpBA,OAA2B,IAAjB4W,EAA0B,CAAC,EAAIA,EACzChX,EAASX,EAAKW,OAElB,GAAsB,mBAAXA,EAAuB,CAChC,IAAIiX,EAAYjX,EAAO,CACrBV,MAAOA,EACPL,KAAMA,EACNiK,SAAUA,EACV9I,QAASA,IAKXkW,EAAiB/F,KAAK0G,GAFT,WAAmB,EAGlC,CACF,IA/GS/N,EAASQ,QAClB,EAMAwN,YAAa,WACX,IAAIX,EAAJ,CAIA,IAAIY,EAAkB7X,EAAME,SACxBvC,EAAYka,EAAgBla,UAC5BD,EAASma,EAAgBna,OAG7B,GAAKyY,GAAiBxY,EAAWD,GAAjC,CAKAsC,EAAMwG,MAAQ,CACZ7I,UAAWwX,GAAiBxX,EAAWqH,EAAgBtH,GAAoC,UAA3BsC,EAAMc,QAAQC,UAC9ErD,OAAQiG,EAAcjG,IAOxBsC,EAAM0R,OAAQ,EACd1R,EAAMjC,UAAYiC,EAAMc,QAAQ/C,UAKhCiC,EAAM+W,iBAAiB5W,SAAQ,SAAU0V,GACvC,OAAO7V,EAAMmG,cAAc0P,EAASlW,MAAQtD,OAAOkE,OAAO,CAAC,EAAGsV,EAASnM,KACzE,IAEA,IAAK,IAAIoO,EAAQ,EAAGA,EAAQ9X,EAAM+W,iBAAiBhH,OAAQ+H,IACzD,IAAoB,IAAhB9X,EAAM0R,MAAV,CAMA,IAAIqG,EAAwB/X,EAAM+W,iBAAiBe,GAC/ChY,EAAKiY,EAAsBjY,GAC3BkY,EAAyBD,EAAsBjX,QAC/CoM,OAAsC,IAA3B8K,EAAoC,CAAC,EAAIA,EACpDrY,EAAOoY,EAAsBpY,KAEf,mBAAPG,IACTE,EAAQF,EAAG,CACTE,MAAOA,EACPc,QAASoM,EACTvN,KAAMA,EACNiK,SAAUA,KACN5J,EAdR,MAHEA,EAAM0R,OAAQ,EACdoG,GAAS,CAzBb,CATA,CAqDF,EAGA1N,QC1I2BtK,ED0IV,WACf,OAAO,IAAImY,SAAQ,SAAUC,GAC3BtO,EAASgO,cACTM,EAAQlY,EACV,GACF,EC7IG,WAUL,OATK8W,IACHA,EAAU,IAAImB,SAAQ,SAAUC,GAC9BD,QAAQC,UAAUC,MAAK,WACrBrB,OAAUsB,EACVF,EAAQpY,IACV,GACF,KAGKgX,CACT,GDmIIuB,QAAS,WACPjB,IACAH,GAAc,CAChB,GAGF,IAAKd,GAAiBxY,EAAWD,GAC/B,OAAOkM,EAmCT,SAASwN,IACPJ,EAAiB7W,SAAQ,SAAUL,GACjC,OAAOA,GACT,IACAkX,EAAmB,EACrB,CAEA,OAvCApN,EAASsN,WAAWpW,GAASqX,MAAK,SAAUnY,IACrCiX,GAAenW,EAAQwX,eAC1BxX,EAAQwX,cAActY,EAE1B,IAmCO4J,CACT,CACF,CACO,IAAI2O,GAA4BhC,KGzLnC,GAA4BA,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,EAAa,GAAQ,GAAM,GAAiB,EAAO,MCJrH,GAA4BjC,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,KCatE,MAAMC,GAAa,IAAIlI,IACjBmI,GAAO,CACX,GAAAtH,CAAIxS,EAASzC,EAAKyN,GACX6O,GAAWzC,IAAIpX,IAClB6Z,GAAWrH,IAAIxS,EAAS,IAAI2R,KAE9B,MAAMoI,EAAcF,GAAWjc,IAAIoC,GAI9B+Z,EAAY3C,IAAI7Z,IAA6B,IAArBwc,EAAYC,KAKzCD,EAAYvH,IAAIjV,EAAKyN,GAHnBiP,QAAQC,MAAM,+EAA+E7W,MAAM8W,KAAKJ,EAAY1Y,QAAQ,MAIhI,EACAzD,IAAG,CAACoC,EAASzC,IACPsc,GAAWzC,IAAIpX,IACV6Z,GAAWjc,IAAIoC,GAASpC,IAAIL,IAE9B,KAET,MAAA6c,CAAOpa,EAASzC,GACd,IAAKsc,GAAWzC,IAAIpX,GAClB,OAEF,MAAM+Z,EAAcF,GAAWjc,IAAIoC,GACnC+Z,EAAYM,OAAO9c,GAGM,IAArBwc,EAAYC,MACdH,GAAWQ,OAAOra,EAEtB,GAYIsa,GAAiB,gBAOjBC,GAAgBC,IAChBA,GAAYna,OAAOoa,KAAOpa,OAAOoa,IAAIC,SAEvCF,EAAWA,EAAS5O,QAAQ,iBAAiB,CAAC+O,EAAOC,IAAO,IAAIH,IAAIC,OAAOE,QAEtEJ,GA4CHK,GAAuB7a,IAC3BA,EAAQ8a,cAAc,IAAIC,MAAMT,IAAgB,EAE5C,GAAYU,MACXA,GAA4B,iBAAXA,UAGO,IAAlBA,EAAOC,SAChBD,EAASA,EAAO,SAEgB,IAApBA,EAAOE,UAEjBC,GAAaH,GAEb,GAAUA,GACLA,EAAOC,OAASD,EAAO,GAAKA,EAEf,iBAAXA,GAAuBA,EAAO7J,OAAS,EACzCrL,SAAS+C,cAAc0R,GAAcS,IAEvC,KAEHI,GAAYpb,IAChB,IAAK,GAAUA,IAAgD,IAApCA,EAAQqb,iBAAiBlK,OAClD,OAAO,EAET,MAAMmK,EAAgF,YAA7D5V,iBAAiB1F,GAASub,iBAAiB,cAE9DC,EAAgBxb,EAAQyb,QAAQ,uBACtC,IAAKD,EACH,OAAOF,EAET,GAAIE,IAAkBxb,EAAS,CAC7B,MAAM0b,EAAU1b,EAAQyb,QAAQ,WAChC,GAAIC,GAAWA,EAAQlW,aAAegW,EACpC,OAAO,EAET,GAAgB,OAAZE,EACF,OAAO,CAEX,CACA,OAAOJ,CAAgB,EAEnBK,GAAa3b,IACZA,GAAWA,EAAQkb,WAAaU,KAAKC,gBAGtC7b,EAAQ8b,UAAU7W,SAAS,mBAGC,IAArBjF,EAAQ+b,SACV/b,EAAQ+b,SAEV/b,EAAQgc,aAAa,aAAoD,UAArChc,EAAQic,aAAa,aAE5DC,GAAiBlc,IACrB,IAAK8F,SAASC,gBAAgBoW,aAC5B,OAAO,KAIT,GAAmC,mBAAxBnc,EAAQqF,YAA4B,CAC7C,MAAM+W,EAAOpc,EAAQqF,cACrB,OAAO+W,aAAgBtb,WAAasb,EAAO,IAC7C,CACA,OAAIpc,aAAmBc,WACdd,EAIJA,EAAQwF,WAGN0W,GAAelc,EAAQwF,YAFrB,IAEgC,EAErC6W,GAAO,OAUPC,GAAStc,IACbA,EAAQuE,YAAY,EAEhBgY,GAAY,IACZlc,OAAOmc,SAAW1W,SAAS6G,KAAKqP,aAAa,qBACxC3b,OAAOmc,OAET,KAEHC,GAA4B,GAgB5BC,GAAQ,IAAuC,QAAjC5W,SAASC,gBAAgB4W,IACvCC,GAAqBC,IAhBAC,QAiBN,KACjB,MAAMC,EAAIR,KAEV,GAAIQ,EAAG,CACL,MAAMhc,EAAO8b,EAAOG,KACdC,EAAqBF,EAAE7b,GAAGH,GAChCgc,EAAE7b,GAAGH,GAAQ8b,EAAOK,gBACpBH,EAAE7b,GAAGH,GAAMoc,YAAcN,EACzBE,EAAE7b,GAAGH,GAAMqc,WAAa,KACtBL,EAAE7b,GAAGH,GAAQkc,EACNJ,EAAOK,gBAElB,GA5B0B,YAAxBpX,SAASuX,YAENZ,GAA0BtL,QAC7BrL,SAASyF,iBAAiB,oBAAoB,KAC5C,IAAK,MAAMuR,KAAYL,GACrBK,GACF,IAGJL,GAA0BpK,KAAKyK,IAE/BA,GAkBA,EAEEQ,GAAU,CAACC,EAAkB9F,EAAO,GAAI+F,EAAeD,IACxB,mBAArBA,EAAkCA,KAAoB9F,GAAQ+F,EAExEC,GAAyB,CAACX,EAAUY,EAAmBC,GAAoB,KAC/E,IAAKA,EAEH,YADAL,GAAQR,GAGV,MACMc,EA/JiC5d,KACvC,IAAKA,EACH,OAAO,EAIT,IAAI,mBACF6d,EAAkB,gBAClBC,GACEzd,OAAOqF,iBAAiB1F,GAC5B,MAAM+d,EAA0BC,OAAOC,WAAWJ,GAC5CK,EAAuBF,OAAOC,WAAWH,GAG/C,OAAKC,GAA4BG,GAKjCL,EAAqBA,EAAmBlb,MAAM,KAAK,GACnDmb,EAAkBA,EAAgBnb,MAAM,KAAK,GAtDf,KAuDtBqb,OAAOC,WAAWJ,GAAsBG,OAAOC,WAAWH,KANzD,CAMoG,EA0IpFK,CAAiCT,GADlC,EAExB,IAAIU,GAAS,EACb,MAAMC,EAAU,EACdrR,aAEIA,IAAW0Q,IAGfU,GAAS,EACTV,EAAkBjS,oBAAoB6O,GAAgB+D,GACtDf,GAAQR,GAAS,EAEnBY,EAAkBnS,iBAAiB+O,GAAgB+D,GACnDC,YAAW,KACJF,GACHvD,GAAqB6C,EACvB,GACCE,EAAiB,EAYhBW,GAAuB,CAAC1R,EAAM2R,EAAeC,EAAeC,KAChE,MAAMC,EAAa9R,EAAKsE,OACxB,IAAI+H,EAAQrM,EAAKjH,QAAQ4Y,GAIzB,OAAe,IAAXtF,GACMuF,GAAiBC,EAAiB7R,EAAK8R,EAAa,GAAK9R,EAAK,IAExEqM,GAASuF,EAAgB,GAAK,EAC1BC,IACFxF,GAASA,EAAQyF,GAAcA,GAE1B9R,EAAKjK,KAAKC,IAAI,EAAGD,KAAKE,IAAIoW,EAAOyF,EAAa,KAAI,EAerDC,GAAiB,qBACjBC,GAAiB,OACjBC,GAAgB,SAChBC,GAAgB,CAAC,EACvB,IAAIC,GAAW,EACf,MAAMC,GAAe,CACnBC,WAAY,YACZC,WAAY,YAERC,GAAe,IAAIrI,IAAI,CAAC,QAAS,WAAY,UAAW,YAAa,cAAe,aAAc,iBAAkB,YAAa,WAAY,YAAa,cAAe,YAAa,UAAW,WAAY,QAAS,oBAAqB,aAAc,YAAa,WAAY,cAAe,cAAe,cAAe,YAAa,eAAgB,gBAAiB,eAAgB,gBAAiB,aAAc,QAAS,OAAQ,SAAU,QAAS,SAAU,SAAU,UAAW,WAAY,OAAQ,SAAU,eAAgB,SAAU,OAAQ,mBAAoB,mBAAoB,QAAS,QAAS,WAM/lB,SAASsI,GAAarf,EAASsf,GAC7B,OAAOA,GAAO,GAAGA,MAAQN,QAAgBhf,EAAQgf,UAAYA,IAC/D,CACA,SAASO,GAAiBvf,GACxB,MAAMsf,EAAMD,GAAarf,GAGzB,OAFAA,EAAQgf,SAAWM,EACnBP,GAAcO,GAAOP,GAAcO,IAAQ,CAAC,EACrCP,GAAcO,EACvB,CAiCA,SAASE,GAAYC,EAAQC,EAAUC,EAAqB,MAC1D,OAAOliB,OAAOmiB,OAAOH,GAAQ7M,MAAKiN,GAASA,EAAMH,WAAaA,GAAYG,EAAMF,qBAAuBA,GACzG,CACA,SAASG,GAAoBC,EAAmB1B,EAAS2B,GACvD,MAAMC,EAAiC,iBAAZ5B,EAErBqB,EAAWO,EAAcD,EAAqB3B,GAAW2B,EAC/D,IAAIE,EAAYC,GAAaJ,GAI7B,OAHKX,GAAahI,IAAI8I,KACpBA,EAAYH,GAEP,CAACE,EAAaP,EAAUQ,EACjC,CACA,SAASE,GAAWpgB,EAAS+f,EAAmB1B,EAAS2B,EAAoBK,GAC3E,GAAiC,iBAAtBN,IAAmC/f,EAC5C,OAEF,IAAKigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GAIzF,GAAID,KAAqBd,GAAc,CACrC,MAAMqB,EAAepf,GACZ,SAAU2e,GACf,IAAKA,EAAMU,eAAiBV,EAAMU,gBAAkBV,EAAMW,iBAAmBX,EAAMW,eAAevb,SAAS4a,EAAMU,eAC/G,OAAOrf,EAAGjD,KAAKwiB,KAAMZ,EAEzB,EAEFH,EAAWY,EAAaZ,EAC1B,CACA,MAAMD,EAASF,GAAiBvf,GAC1B0gB,EAAWjB,EAAOS,KAAeT,EAAOS,GAAa,CAAC,GACtDS,EAAmBnB,GAAYkB,EAAUhB,EAAUO,EAAc5B,EAAU,MACjF,GAAIsC,EAEF,YADAA,EAAiBN,OAASM,EAAiBN,QAAUA,GAGvD,MAAMf,EAAMD,GAAaK,EAAUK,EAAkBnU,QAAQgT,GAAgB,KACvE1d,EAAK+e,EA5Db,SAAoCjgB,EAASwa,EAAUtZ,GACrD,OAAO,SAASmd,EAAQwB,GACtB,MAAMe,EAAc5gB,EAAQ6gB,iBAAiBrG,GAC7C,IAAK,IAAI,OACPxN,GACE6S,EAAO7S,GAAUA,IAAWyT,KAAMzT,EAASA,EAAOxH,WACpD,IAAK,MAAMsb,KAAcF,EACvB,GAAIE,IAAe9T,EASnB,OANA+T,GAAWlB,EAAO,CAChBW,eAAgBxT,IAEdqR,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAM1G,EAAUtZ,GAE3CA,EAAGigB,MAAMnU,EAAQ,CAAC6S,GAG/B,CACF,CAwC2BuB,CAA2BphB,EAASqe,EAASqB,GAvExE,SAA0B1f,EAASkB,GACjC,OAAO,SAASmd,EAAQwB,GAOtB,OANAkB,GAAWlB,EAAO,CAChBW,eAAgBxgB,IAEdqe,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAMhgB,GAEjCA,EAAGigB,MAAMnhB,EAAS,CAAC6f,GAC5B,CACF,CA6DoFwB,CAAiBrhB,EAAS0f,GAC5Gxe,EAAGye,mBAAqBM,EAAc5B,EAAU,KAChDnd,EAAGwe,SAAWA,EACdxe,EAAGmf,OAASA,EACZnf,EAAG8d,SAAWM,EACdoB,EAASpB,GAAOpe,EAChBlB,EAAQuL,iBAAiB2U,EAAWhf,EAAI+e,EAC1C,CACA,SAASqB,GAActhB,EAASyf,EAAQS,EAAW7B,EAASsB,GAC1D,MAAMze,EAAKse,GAAYC,EAAOS,GAAY7B,EAASsB,GAC9Cze,IAGLlB,EAAQyL,oBAAoByU,EAAWhf,EAAIqgB,QAAQ5B,WAC5CF,EAAOS,GAAWhf,EAAG8d,UAC9B,CACA,SAASwC,GAAyBxhB,EAASyf,EAAQS,EAAWuB,GAC5D,MAAMC,EAAoBjC,EAAOS,IAAc,CAAC,EAChD,IAAK,MAAOyB,EAAY9B,KAAUpiB,OAAOmkB,QAAQF,GAC3CC,EAAWE,SAASJ,IACtBH,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAGtE,CACA,SAASQ,GAAaN,GAGpB,OADAA,EAAQA,EAAMjU,QAAQiT,GAAgB,IAC/BI,GAAaY,IAAUA,CAChC,CACA,MAAMmB,GAAe,CACnB,EAAAc,CAAG9hB,EAAS6f,EAAOxB,EAAS2B,GAC1BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAA+B,CAAI/hB,EAAS6f,EAAOxB,EAAS2B,GAC3BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAAiB,CAAIjhB,EAAS+f,EAAmB1B,EAAS2B,GACvC,GAAiC,iBAAtBD,IAAmC/f,EAC5C,OAEF,MAAOigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GACrFgC,EAAc9B,IAAcH,EAC5BN,EAASF,GAAiBvf,GAC1B0hB,EAAoBjC,EAAOS,IAAc,CAAC,EAC1C+B,EAAclC,EAAkBmC,WAAW,KACjD,QAAwB,IAAbxC,EAAX,CAQA,GAAIuC,EACF,IAAK,MAAME,KAAgB1kB,OAAO4D,KAAKoe,GACrC+B,GAAyBxhB,EAASyf,EAAQ0C,EAAcpC,EAAkBlN,MAAM,IAGpF,IAAK,MAAOuP,EAAavC,KAAUpiB,OAAOmkB,QAAQF,GAAoB,CACpE,MAAMC,EAAaS,EAAYxW,QAAQkT,GAAe,IACjDkD,IAAejC,EAAkB8B,SAASF,IAC7CL,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAEpE,CAXA,KAPA,CAEE,IAAKliB,OAAO4D,KAAKqgB,GAAmBvQ,OAClC,OAEFmQ,GAActhB,EAASyf,EAAQS,EAAWR,EAAUO,EAAc5B,EAAU,KAE9E,CAYF,EACA,OAAAgE,CAAQriB,EAAS6f,EAAOpI,GACtB,GAAqB,iBAAVoI,IAAuB7f,EAChC,OAAO,KAET,MAAM+c,EAAIR,KAGV,IAAI+F,EAAc,KACdC,GAAU,EACVC,GAAiB,EACjBC,GAAmB,EAJH5C,IADFM,GAAaN,IAMZ9C,IACjBuF,EAAcvF,EAAEhC,MAAM8E,EAAOpI,GAC7BsF,EAAE/c,GAASqiB,QAAQC,GACnBC,GAAWD,EAAYI,uBACvBF,GAAkBF,EAAYK,gCAC9BF,EAAmBH,EAAYM,sBAEjC,MAAMC,EAAM9B,GAAW,IAAIhG,MAAM8E,EAAO,CACtC0C,UACAO,YAAY,IACVrL,GAUJ,OATIgL,GACFI,EAAIE,iBAEFP,GACFxiB,EAAQ8a,cAAc+H,GAEpBA,EAAIJ,kBAAoBH,GAC1BA,EAAYS,iBAEPF,CACT,GAEF,SAAS9B,GAAWljB,EAAKmlB,EAAO,CAAC,GAC/B,IAAK,MAAOzlB,EAAKa,KAAUX,OAAOmkB,QAAQoB,GACxC,IACEnlB,EAAIN,GAAOa,CACb,CAAE,MAAO6kB,GACPxlB,OAAOC,eAAeG,EAAKN,EAAK,CAC9B2lB,cAAc,EACdtlB,IAAG,IACMQ,GAGb,CAEF,OAAOP,CACT,CASA,SAASslB,GAAc/kB,GACrB,GAAc,SAAVA,EACF,OAAO,EAET,GAAc,UAAVA,EACF,OAAO,EAET,GAAIA,IAAU4f,OAAO5f,GAAOkC,WAC1B,OAAO0d,OAAO5f,GAEhB,GAAc,KAAVA,GAA0B,SAAVA,EAClB,OAAO,KAET,GAAqB,iBAAVA,EACT,OAAOA,EAET,IACE,OAAOglB,KAAKC,MAAMC,mBAAmBllB,GACvC,CAAE,MAAO6kB,GACP,OAAO7kB,CACT,CACF,CACA,SAASmlB,GAAiBhmB,GACxB,OAAOA,EAAIqO,QAAQ,UAAU4X,GAAO,IAAIA,EAAItjB,iBAC9C,CACA,MAAMujB,GAAc,CAClB,gBAAAC,CAAiB1jB,EAASzC,EAAKa,GAC7B4B,EAAQ6B,aAAa,WAAW0hB,GAAiBhmB,KAAQa,EAC3D,EACA,mBAAAulB,CAAoB3jB,EAASzC,GAC3ByC,EAAQ4B,gBAAgB,WAAW2hB,GAAiBhmB,KACtD,EACA,iBAAAqmB,CAAkB5jB,GAChB,IAAKA,EACH,MAAO,CAAC,EAEV,MAAM0B,EAAa,CAAC,EACdmiB,EAASpmB,OAAO4D,KAAKrB,EAAQ8jB,SAASld,QAAOrJ,GAAOA,EAAI2kB,WAAW,QAAU3kB,EAAI2kB,WAAW,cAClG,IAAK,MAAM3kB,KAAOsmB,EAAQ,CACxB,IAAIE,EAAUxmB,EAAIqO,QAAQ,MAAO,IACjCmY,EAAUA,EAAQC,OAAO,GAAG9jB,cAAgB6jB,EAAQlR,MAAM,EAAGkR,EAAQ5S,QACrEzP,EAAWqiB,GAAWZ,GAAcnjB,EAAQ8jB,QAAQvmB,GACtD,CACA,OAAOmE,CACT,EACAuiB,iBAAgB,CAACjkB,EAASzC,IACjB4lB,GAAcnjB,EAAQic,aAAa,WAAWsH,GAAiBhmB,QAgB1E,MAAM2mB,GAEJ,kBAAWC,GACT,MAAO,CAAC,CACV,CACA,sBAAWC,GACT,MAAO,CAAC,CACV,CACA,eAAWpH,GACT,MAAM,IAAIqH,MAAM,sEAClB,CACA,UAAAC,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAChB,OAAOA,CACT,CACA,eAAAC,CAAgBD,EAAQvkB,GACtB,MAAM2kB,EAAa,GAAU3kB,GAAWyjB,GAAYQ,iBAAiBjkB,EAAS,UAAY,CAAC,EAE3F,MAAO,IACFygB,KAAKmE,YAAYT,WACM,iBAAfQ,EAA0BA,EAAa,CAAC,KAC/C,GAAU3kB,GAAWyjB,GAAYG,kBAAkB5jB,GAAW,CAAC,KAC7C,iBAAXukB,EAAsBA,EAAS,CAAC,EAE/C,CACA,gBAAAG,CAAiBH,EAAQM,EAAcpE,KAAKmE,YAAYR,aACtD,IAAK,MAAO7hB,EAAUuiB,KAAkBrnB,OAAOmkB,QAAQiD,GAAc,CACnE,MAAMzmB,EAAQmmB,EAAOhiB,GACfwiB,EAAY,GAAU3mB,GAAS,UAhiBrC4c,OADSA,EAiiB+C5c,GA/hBnD,GAAG4c,IAELvd,OAAOM,UAAUuC,SAASrC,KAAK+c,GAAQL,MAAM,eAAe,GAAGza,cA8hBlE,IAAK,IAAI8kB,OAAOF,GAAehhB,KAAKihB,GAClC,MAAM,IAAIE,UAAU,GAAGxE,KAAKmE,YAAY5H,KAAKkI,0BAA0B3iB,qBAA4BwiB,yBAAiCD,MAExI,CAriBW9J,KAsiBb,EAqBF,MAAMmK,WAAsBjB,GAC1B,WAAAU,CAAY5kB,EAASukB,GACnBa,SACAplB,EAAUmb,GAAWnb,MAIrBygB,KAAK4E,SAAWrlB,EAChBygB,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/BzK,GAAKtH,IAAIiO,KAAK4E,SAAU5E,KAAKmE,YAAYW,SAAU9E,MACrD,CAGA,OAAA+E,GACE1L,GAAKM,OAAOqG,KAAK4E,SAAU5E,KAAKmE,YAAYW,UAC5CvE,GAAaC,IAAIR,KAAK4E,SAAU5E,KAAKmE,YAAYa,WACjD,IAAK,MAAMC,KAAgBjoB,OAAOkoB,oBAAoBlF,MACpDA,KAAKiF,GAAgB,IAEzB,CACA,cAAAE,CAAe9I,EAAU9c,EAAS6lB,GAAa,GAC7CpI,GAAuBX,EAAU9c,EAAS6lB,EAC5C,CACA,UAAAvB,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,EAAQ9D,KAAK4E,UAC3Cd,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CAGA,kBAAOuB,CAAY9lB,GACjB,OAAO8Z,GAAKlc,IAAIud,GAAWnb,GAAUygB,KAAK8E,SAC5C,CACA,0BAAOQ,CAAoB/lB,EAASukB,EAAS,CAAC,GAC5C,OAAO9D,KAAKqF,YAAY9lB,IAAY,IAAIygB,KAAKzgB,EAA2B,iBAAXukB,EAAsBA,EAAS,KAC9F,CACA,kBAAWyB,GACT,MA5CY,OA6Cd,CACA,mBAAWT,GACT,MAAO,MAAM9E,KAAKzD,MACpB,CACA,oBAAWyI,GACT,MAAO,IAAIhF,KAAK8E,UAClB,CACA,gBAAOU,CAAUllB,GACf,MAAO,GAAGA,IAAO0f,KAAKgF,WACxB,EAUF,MAAMS,GAAclmB,IAClB,IAAIwa,EAAWxa,EAAQic,aAAa,kBACpC,IAAKzB,GAAyB,MAAbA,EAAkB,CACjC,IAAI2L,EAAgBnmB,EAAQic,aAAa,QAMzC,IAAKkK,IAAkBA,EAActE,SAAS,OAASsE,EAAcjE,WAAW,KAC9E,OAAO,KAILiE,EAActE,SAAS,OAASsE,EAAcjE,WAAW,OAC3DiE,EAAgB,IAAIA,EAAcxjB,MAAM,KAAK,MAE/C6X,EAAW2L,GAAmC,MAAlBA,EAAwBA,EAAcC,OAAS,IAC7E,CACA,OAAO5L,EAAWA,EAAS7X,MAAM,KAAKY,KAAI8iB,GAAO9L,GAAc8L,KAAM1iB,KAAK,KAAO,IAAI,EAEjF2iB,GAAiB,CACrB1T,KAAI,CAAC4H,EAAUxa,EAAU8F,SAASC,kBACzB,GAAG3G,UAAUsB,QAAQ3C,UAAU8iB,iBAAiB5iB,KAAK+B,EAASwa,IAEvE+L,QAAO,CAAC/L,EAAUxa,EAAU8F,SAASC,kBAC5BrF,QAAQ3C,UAAU8K,cAAc5K,KAAK+B,EAASwa,GAEvDgM,SAAQ,CAACxmB,EAASwa,IACT,GAAGpb,UAAUY,EAAQwmB,UAAU5f,QAAOzB,GAASA,EAAMshB,QAAQjM,KAEtE,OAAAkM,CAAQ1mB,EAASwa,GACf,MAAMkM,EAAU,GAChB,IAAIC,EAAW3mB,EAAQwF,WAAWiW,QAAQjB,GAC1C,KAAOmM,GACLD,EAAQrU,KAAKsU,GACbA,EAAWA,EAASnhB,WAAWiW,QAAQjB,GAEzC,OAAOkM,CACT,EACA,IAAAE,CAAK5mB,EAASwa,GACZ,IAAIqM,EAAW7mB,EAAQ8mB,uBACvB,KAAOD,GAAU,CACf,GAAIA,EAASJ,QAAQjM,GACnB,MAAO,CAACqM,GAEVA,EAAWA,EAASC,sBACtB,CACA,MAAO,EACT,EAEA,IAAAxhB,CAAKtF,EAASwa,GACZ,IAAIlV,EAAOtF,EAAQ+mB,mBACnB,KAAOzhB,GAAM,CACX,GAAIA,EAAKmhB,QAAQjM,GACf,MAAO,CAAClV,GAEVA,EAAOA,EAAKyhB,kBACd,CACA,MAAO,EACT,EACA,iBAAAC,CAAkBhnB,GAChB,MAAMinB,EAAa,CAAC,IAAK,SAAU,QAAS,WAAY,SAAU,UAAW,aAAc,4BAA4B1jB,KAAIiX,GAAY,GAAGA,2BAAiC7W,KAAK,KAChL,OAAO8c,KAAK7N,KAAKqU,EAAYjnB,GAAS4G,QAAOsgB,IAAOvL,GAAWuL,IAAO9L,GAAU8L,IAClF,EACA,sBAAAC,CAAuBnnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAIwa,GACK8L,GAAeC,QAAQ/L,GAAYA,EAErC,IACT,EACA,sBAAA4M,CAAuBpnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW8L,GAAeC,QAAQ/L,GAAY,IACvD,EACA,+BAAA6M,CAAgCrnB,GAC9B,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW8L,GAAe1T,KAAK4H,GAAY,EACpD,GAUI8M,GAAuB,CAACC,EAAWC,EAAS,UAChD,MAAMC,EAAa,gBAAgBF,EAAU9B,YACvC1kB,EAAOwmB,EAAUvK,KACvBgE,GAAac,GAAGhc,SAAU2hB,EAAY,qBAAqB1mB,OAAU,SAAU8e,GAI7E,GAHI,CAAC,IAAK,QAAQgC,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEF,MAAMzT,EAASsZ,GAAec,uBAAuB3G,OAASA,KAAKhF,QAAQ,IAAI1a,KAC9DwmB,EAAUxB,oBAAoB/Y,GAGtCwa,IACX,GAAE,EAiBEG,GAAc,YACdC,GAAc,QAAQD,KACtBE,GAAe,SAASF,KAQ9B,MAAMG,WAAc3C,GAElB,eAAWnI,GACT,MAfW,OAgBb,CAGA,KAAA+K,GAEE,GADmB/G,GAAaqB,QAAQ5B,KAAK4E,SAAUuC,IACxCnF,iBACb,OAEFhC,KAAK4E,SAASvJ,UAAU1B,OAlBF,QAmBtB,MAAMyL,EAAapF,KAAK4E,SAASvJ,UAAU7W,SApBrB,QAqBtBwb,KAAKmF,gBAAe,IAAMnF,KAAKuH,mBAAmBvH,KAAK4E,SAAUQ,EACnE,CAGA,eAAAmC,GACEvH,KAAK4E,SAASjL,SACd4G,GAAaqB,QAAQ5B,KAAK4E,SAAUwC,IACpCpH,KAAK+E,SACP,CAGA,sBAAOtI,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOgd,GAAM/B,oBAAoBtF,MACvC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOF6G,GAAqBQ,GAAO,SAM5BlL,GAAmBkL,IAcnB,MAKMI,GAAyB,4BAO/B,MAAMC,WAAehD,GAEnB,eAAWnI,GACT,MAfW,QAgBb,CAGA,MAAAoL,GAEE3H,KAAK4E,SAASxjB,aAAa,eAAgB4e,KAAK4E,SAASvJ,UAAUsM,OAjB3C,UAkB1B,CAGA,sBAAOlL,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOqd,GAAOpC,oBAAoBtF,MACzB,WAAX8D,GACFzZ,EAAKyZ,IAET,GACF,EAOFvD,GAAac,GAAGhc,SAjCe,2BAiCmBoiB,IAAwBrI,IACxEA,EAAMkD,iBACN,MAAMsF,EAASxI,EAAM7S,OAAOyO,QAAQyM,IACvBC,GAAOpC,oBAAoBsC,GACnCD,QAAQ,IAOfxL,GAAmBuL,IAcnB,MACMG,GAAc,YACdC,GAAmB,aAAaD,KAChCE,GAAkB,YAAYF,KAC9BG,GAAiB,WAAWH,KAC5BI,GAAoB,cAAcJ,KAClCK,GAAkB,YAAYL,KAK9BM,GAAY,CAChBC,YAAa,KACbC,aAAc,KACdC,cAAe,MAEXC,GAAgB,CACpBH,YAAa,kBACbC,aAAc,kBACdC,cAAe,mBAOjB,MAAME,WAAc/E,GAClB,WAAAU,CAAY5kB,EAASukB,GACnBa,QACA3E,KAAK4E,SAAWrlB,EACXA,GAAYipB,GAAMC,gBAGvBzI,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAK0I,QAAU,EACf1I,KAAK2I,sBAAwB7H,QAAQlhB,OAAOgpB,cAC5C5I,KAAK6I,cACP,CAGA,kBAAWnF,GACT,OAAOyE,EACT,CACA,sBAAWxE,GACT,OAAO4E,EACT,CACA,eAAWhM,GACT,MA/CW,OAgDb,CAGA,OAAAwI,GACExE,GAAaC,IAAIR,KAAK4E,SAAUiD,GAClC,CAGA,MAAAiB,CAAO1J,GACAY,KAAK2I,sBAIN3I,KAAK+I,wBAAwB3J,KAC/BY,KAAK0I,QAAUtJ,EAAM4J,SAJrBhJ,KAAK0I,QAAUtJ,EAAM6J,QAAQ,GAAGD,OAMpC,CACA,IAAAE,CAAK9J,GACCY,KAAK+I,wBAAwB3J,KAC/BY,KAAK0I,QAAUtJ,EAAM4J,QAAUhJ,KAAK0I,SAEtC1I,KAAKmJ,eACLtM,GAAQmD,KAAK6E,QAAQuD,YACvB,CACA,KAAAgB,CAAMhK,GACJY,KAAK0I,QAAUtJ,EAAM6J,SAAW7J,EAAM6J,QAAQvY,OAAS,EAAI,EAAI0O,EAAM6J,QAAQ,GAAGD,QAAUhJ,KAAK0I,OACjG,CACA,YAAAS,GACE,MAAME,EAAYlnB,KAAKoC,IAAIyb,KAAK0I,SAChC,GAAIW,GAnEgB,GAoElB,OAEF,MAAM/b,EAAY+b,EAAYrJ,KAAK0I,QACnC1I,KAAK0I,QAAU,EACVpb,GAGLuP,GAAQvP,EAAY,EAAI0S,KAAK6E,QAAQyD,cAAgBtI,KAAK6E,QAAQwD,aACpE,CACA,WAAAQ,GACM7I,KAAK2I,uBACPpI,GAAac,GAAGrB,KAAK4E,SAAUqD,IAAmB7I,GAASY,KAAK8I,OAAO1J,KACvEmB,GAAac,GAAGrB,KAAK4E,SAAUsD,IAAiB9I,GAASY,KAAKkJ,KAAK9J,KACnEY,KAAK4E,SAASvJ,UAAU5E,IAlFG,mBAoF3B8J,GAAac,GAAGrB,KAAK4E,SAAUkD,IAAkB1I,GAASY,KAAK8I,OAAO1J,KACtEmB,GAAac,GAAGrB,KAAK4E,SAAUmD,IAAiB3I,GAASY,KAAKoJ,MAAMhK,KACpEmB,GAAac,GAAGrB,KAAK4E,SAAUoD,IAAgB5I,GAASY,KAAKkJ,KAAK9J,KAEtE,CACA,uBAAA2J,CAAwB3J,GACtB,OAAOY,KAAK2I,wBA3FS,QA2FiBvJ,EAAMkK,aA5FrB,UA4FyDlK,EAAMkK,YACxF,CAGA,kBAAOb,GACL,MAAO,iBAAkBpjB,SAASC,iBAAmB7C,UAAU8mB,eAAiB,CAClF,EAeF,MAEMC,GAAc,eACdC,GAAiB,YACjBC,GAAmB,YACnBC,GAAoB,aAGpBC,GAAa,OACbC,GAAa,OACbC,GAAiB,OACjBC,GAAkB,QAClBC,GAAc,QAAQR,KACtBS,GAAa,OAAOT,KACpBU,GAAkB,UAAUV,KAC5BW,GAAqB,aAAaX,KAClCY,GAAqB,aAAaZ,KAClCa,GAAmB,YAAYb,KAC/Bc,GAAwB,OAAOd,KAAcC,KAC7Cc,GAAyB,QAAQf,KAAcC,KAC/Ce,GAAsB,WACtBC,GAAsB,SAMtBC,GAAkB,UAClBC,GAAgB,iBAChBC,GAAuBF,GAAkBC,GAKzCE,GAAmB,CACvB,CAACnB,IAAmBK,GACpB,CAACJ,IAAoBG,IAEjBgB,GAAY,CAChBC,SAAU,IACVC,UAAU,EACVC,MAAO,QACPC,MAAM,EACNC,OAAO,EACPC,MAAM,GAEFC,GAAgB,CACpBN,SAAU,mBAEVC,SAAU,UACVC,MAAO,mBACPC,KAAM,mBACNC,MAAO,UACPC,KAAM,WAOR,MAAME,WAAiB5G,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKuL,UAAY,KACjBvL,KAAKwL,eAAiB,KACtBxL,KAAKyL,YAAa,EAClBzL,KAAK0L,aAAe,KACpB1L,KAAK2L,aAAe,KACpB3L,KAAK4L,mBAAqB/F,GAAeC,QArCjB,uBAqC8C9F,KAAK4E,UAC3E5E,KAAK6L,qBACD7L,KAAK6E,QAAQqG,OAASV,IACxBxK,KAAK8L,OAET,CAGA,kBAAWpI,GACT,OAAOoH,EACT,CACA,sBAAWnH,GACT,OAAO0H,EACT,CACA,eAAW9O,GACT,MAnFW,UAoFb,CAGA,IAAA1X,GACEmb,KAAK+L,OAAOnC,GACd,CACA,eAAAoC,IAIO3mB,SAAS4mB,QAAUtR,GAAUqF,KAAK4E,WACrC5E,KAAKnb,MAET,CACA,IAAAshB,GACEnG,KAAK+L,OAAOlC,GACd,CACA,KAAAoB,GACMjL,KAAKyL,YACPrR,GAAqB4F,KAAK4E,UAE5B5E,KAAKkM,gBACP,CACA,KAAAJ,GACE9L,KAAKkM,iBACLlM,KAAKmM,kBACLnM,KAAKuL,UAAYa,aAAY,IAAMpM,KAAKgM,mBAAmBhM,KAAK6E,QAAQkG,SAC1E,CACA,iBAAAsB,GACOrM,KAAK6E,QAAQqG,OAGdlL,KAAKyL,WACPlL,GAAae,IAAItB,KAAK4E,SAAUqF,IAAY,IAAMjK,KAAK8L,UAGzD9L,KAAK8L,QACP,CACA,EAAAQ,CAAG7T,GACD,MAAM8T,EAAQvM,KAAKwM,YACnB,GAAI/T,EAAQ8T,EAAM7b,OAAS,GAAK+H,EAAQ,EACtC,OAEF,GAAIuH,KAAKyL,WAEP,YADAlL,GAAae,IAAItB,KAAK4E,SAAUqF,IAAY,IAAMjK,KAAKsM,GAAG7T,KAG5D,MAAMgU,EAAczM,KAAK0M,cAAc1M,KAAK2M,cAC5C,GAAIF,IAAgBhU,EAClB,OAEF,MAAMtC,EAAQsC,EAAQgU,EAAc7C,GAAaC,GACjD7J,KAAK+L,OAAO5V,EAAOoW,EAAM9T,GAC3B,CACA,OAAAsM,GACM/E,KAAK2L,cACP3L,KAAK2L,aAAa5G,UAEpBJ,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAEhB,OADAA,EAAO8I,gBAAkB9I,EAAOiH,SACzBjH,CACT,CACA,kBAAA+H,GACM7L,KAAK6E,QAAQmG,UACfzK,GAAac,GAAGrB,KAAK4E,SAAUsF,IAAiB9K,GAASY,KAAK6M,SAASzN,KAE9C,UAAvBY,KAAK6E,QAAQoG,QACf1K,GAAac,GAAGrB,KAAK4E,SAAUuF,IAAoB,IAAMnK,KAAKiL,UAC9D1K,GAAac,GAAGrB,KAAK4E,SAAUwF,IAAoB,IAAMpK,KAAKqM,uBAE5DrM,KAAK6E,QAAQsG,OAAS3C,GAAMC,eAC9BzI,KAAK8M,yBAET,CACA,uBAAAA,GACE,IAAK,MAAMC,KAAOlH,GAAe1T,KArIX,qBAqImC6N,KAAK4E,UAC5DrE,GAAac,GAAG0L,EAAK1C,IAAkBjL,GAASA,EAAMkD,mBAExD,MAmBM0K,EAAc,CAClB3E,aAAc,IAAMrI,KAAK+L,OAAO/L,KAAKiN,kBAAkBnD,KACvDxB,cAAe,IAAMtI,KAAK+L,OAAO/L,KAAKiN,kBAAkBlD,KACxD3B,YAtBkB,KACS,UAAvBpI,KAAK6E,QAAQoG,QAYjBjL,KAAKiL,QACDjL,KAAK0L,cACPwB,aAAalN,KAAK0L,cAEpB1L,KAAK0L,aAAe7N,YAAW,IAAMmC,KAAKqM,qBAjLjB,IAiL+DrM,KAAK6E,QAAQkG,UAAS,GAOhH/K,KAAK2L,aAAe,IAAInD,GAAMxI,KAAK4E,SAAUoI,EAC/C,CACA,QAAAH,CAASzN,GACP,GAAI,kBAAkB/b,KAAK+b,EAAM7S,OAAO0a,SACtC,OAEF,MAAM3Z,EAAYud,GAAiBzL,EAAMtiB,KACrCwQ,IACF8R,EAAMkD,iBACNtC,KAAK+L,OAAO/L,KAAKiN,kBAAkB3f,IAEvC,CACA,aAAAof,CAAcntB,GACZ,OAAOygB,KAAKwM,YAAYrnB,QAAQ5F,EAClC,CACA,0BAAA4tB,CAA2B1U,GACzB,IAAKuH,KAAK4L,mBACR,OAEF,MAAMwB,EAAkBvH,GAAeC,QAAQ4E,GAAiB1K,KAAK4L,oBACrEwB,EAAgB/R,UAAU1B,OAAO8Q,IACjC2C,EAAgBjsB,gBAAgB,gBAChC,MAAMksB,EAAqBxH,GAAeC,QAAQ,sBAAsBrN,MAAWuH,KAAK4L,oBACpFyB,IACFA,EAAmBhS,UAAU5E,IAAIgU,IACjC4C,EAAmBjsB,aAAa,eAAgB,QAEpD,CACA,eAAA+qB,GACE,MAAM5sB,EAAUygB,KAAKwL,gBAAkBxL,KAAK2M,aAC5C,IAAKptB,EACH,OAEF,MAAM+tB,EAAkB/P,OAAOgQ,SAAShuB,EAAQic,aAAa,oBAAqB,IAClFwE,KAAK6E,QAAQkG,SAAWuC,GAAmBtN,KAAK6E,QAAQ+H,eAC1D,CACA,MAAAb,CAAO5V,EAAO5W,EAAU,MACtB,GAAIygB,KAAKyL,WACP,OAEF,MAAM1N,EAAgBiC,KAAK2M,aACrBa,EAASrX,IAAUyT,GACnB6D,EAAcluB,GAAWue,GAAqBkC,KAAKwM,YAAazO,EAAeyP,EAAQxN,KAAK6E,QAAQuG,MAC1G,GAAIqC,IAAgB1P,EAClB,OAEF,MAAM2P,EAAmB1N,KAAK0M,cAAce,GACtCE,EAAenI,GACZjF,GAAaqB,QAAQ5B,KAAK4E,SAAUY,EAAW,CACpD1F,cAAe2N,EACfngB,UAAW0S,KAAK4N,kBAAkBzX,GAClCuD,KAAMsG,KAAK0M,cAAc3O,GACzBuO,GAAIoB,IAIR,GADmBC,EAAa3D,IACjBhI,iBACb,OAEF,IAAKjE,IAAkB0P,EAGrB,OAEF,MAAMI,EAAY/M,QAAQd,KAAKuL,WAC/BvL,KAAKiL,QACLjL,KAAKyL,YAAa,EAClBzL,KAAKmN,2BAA2BO,GAChC1N,KAAKwL,eAAiBiC,EACtB,MAAMK,EAAuBN,EA3OR,sBADF,oBA6ObO,EAAiBP,EA3OH,qBACA,qBA2OpBC,EAAYpS,UAAU5E,IAAIsX,GAC1BlS,GAAO4R,GACP1P,EAAc1C,UAAU5E,IAAIqX,GAC5BL,EAAYpS,UAAU5E,IAAIqX,GAQ1B9N,KAAKmF,gBAPoB,KACvBsI,EAAYpS,UAAU1B,OAAOmU,EAAsBC,GACnDN,EAAYpS,UAAU5E,IAAIgU,IAC1B1M,EAAc1C,UAAU1B,OAAO8Q,GAAqBsD,EAAgBD,GACpE9N,KAAKyL,YAAa,EAClBkC,EAAa1D,GAAW,GAEYlM,EAAeiC,KAAKgO,eACtDH,GACF7N,KAAK8L,OAET,CACA,WAAAkC,GACE,OAAOhO,KAAK4E,SAASvJ,UAAU7W,SAhQV,QAiQvB,CACA,UAAAmoB,GACE,OAAO9G,GAAeC,QAAQ8E,GAAsB5K,KAAK4E,SAC3D,CACA,SAAA4H,GACE,OAAO3G,GAAe1T,KAAKwY,GAAe3K,KAAK4E,SACjD,CACA,cAAAsH,GACMlM,KAAKuL,YACP0C,cAAcjO,KAAKuL,WACnBvL,KAAKuL,UAAY,KAErB,CACA,iBAAA0B,CAAkB3f,GAChB,OAAI2O,KACK3O,IAAcwc,GAAiBD,GAAaD,GAE9Ctc,IAAcwc,GAAiBF,GAAaC,EACrD,CACA,iBAAA+D,CAAkBzX,GAChB,OAAI8F,KACK9F,IAAU0T,GAAaC,GAAiBC,GAE1C5T,IAAU0T,GAAaE,GAAkBD,EAClD,CAGA,sBAAOrN,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOihB,GAAShG,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,GAIX,GAAsB,iBAAXA,EAAqB,CAC9B,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,OAREzZ,EAAKiiB,GAAGxI,EASZ,GACF,EAOFvD,GAAac,GAAGhc,SAAUklB,GAvSE,uCAuS2C,SAAUnL,GAC/E,MAAM7S,EAASsZ,GAAec,uBAAuB3G,MACrD,IAAKzT,IAAWA,EAAO8O,UAAU7W,SAASgmB,IACxC,OAEFpL,EAAMkD,iBACN,MAAM4L,EAAW5C,GAAShG,oBAAoB/Y,GACxC4hB,EAAanO,KAAKxE,aAAa,oBACrC,OAAI2S,GACFD,EAAS5B,GAAG6B,QACZD,EAAS7B,qBAGyC,SAAhDrJ,GAAYQ,iBAAiBxD,KAAM,UACrCkO,EAASrpB,YACTqpB,EAAS7B,sBAGX6B,EAAS/H,YACT+H,EAAS7B,oBACX,IACA9L,GAAac,GAAGzhB,OAAQ0qB,IAAuB,KAC7C,MAAM8D,EAAYvI,GAAe1T,KA5TR,6BA6TzB,IAAK,MAAM+b,KAAYE,EACrB9C,GAAShG,oBAAoB4I,EAC/B,IAOF/R,GAAmBmP,IAcnB,MAEM+C,GAAc,eAEdC,GAAe,OAAOD,KACtBE,GAAgB,QAAQF,KACxBG,GAAe,OAAOH,KACtBI,GAAiB,SAASJ,KAC1BK,GAAyB,QAAQL,cACjCM,GAAoB,OACpBC,GAAsB,WACtBC,GAAwB,aAExBC,GAA6B,WAAWF,OAAwBA,KAKhEG,GAAyB,8BACzBC,GAAY,CAChBvqB,OAAQ,KACRkjB,QAAQ,GAEJsH,GAAgB,CACpBxqB,OAAQ,iBACRkjB,OAAQ,WAOV,MAAMuH,WAAiBxK,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKmP,kBAAmB,EACxBnP,KAAKoP,cAAgB,GACrB,MAAMC,EAAaxJ,GAAe1T,KAAK4c,IACvC,IAAK,MAAMO,KAAQD,EAAY,CAC7B,MAAMtV,EAAW8L,GAAea,uBAAuB4I,GACjDC,EAAgB1J,GAAe1T,KAAK4H,GAAU5T,QAAOqpB,GAAgBA,IAAiBxP,KAAK4E,WAChF,OAAb7K,GAAqBwV,EAAc7e,QACrCsP,KAAKoP,cAAcxd,KAAK0d,EAE5B,CACAtP,KAAKyP,sBACAzP,KAAK6E,QAAQpgB,QAChBub,KAAK0P,0BAA0B1P,KAAKoP,cAAepP,KAAK2P,YAEtD3P,KAAK6E,QAAQ8C,QACf3H,KAAK2H,QAET,CAGA,kBAAWjE,GACT,OAAOsL,EACT,CACA,sBAAWrL,GACT,OAAOsL,EACT,CACA,eAAW1S,GACT,MA9DW,UA+Db,CAGA,MAAAoL,GACM3H,KAAK2P,WACP3P,KAAK4P,OAEL5P,KAAK6P,MAET,CACA,IAAAA,GACE,GAAI7P,KAAKmP,kBAAoBnP,KAAK2P,WAChC,OAEF,IAAIG,EAAiB,GAQrB,GALI9P,KAAK6E,QAAQpgB,SACfqrB,EAAiB9P,KAAK+P,uBAhEH,wCAgE4C5pB,QAAO5G,GAAWA,IAAYygB,KAAK4E,WAAU9hB,KAAIvD,GAAW2vB,GAAS5J,oBAAoB/lB,EAAS,CAC/JooB,QAAQ,OAGRmI,EAAepf,QAAUof,EAAe,GAAGX,iBAC7C,OAGF,GADmB5O,GAAaqB,QAAQ5B,KAAK4E,SAAU0J,IACxCtM,iBACb,OAEF,IAAK,MAAMgO,KAAkBF,EAC3BE,EAAeJ,OAEjB,MAAMK,EAAYjQ,KAAKkQ,gBACvBlQ,KAAK4E,SAASvJ,UAAU1B,OAAOiV,IAC/B5O,KAAK4E,SAASvJ,UAAU5E,IAAIoY,IAC5B7O,KAAK4E,SAAS7jB,MAAMkvB,GAAa,EACjCjQ,KAAK0P,0BAA0B1P,KAAKoP,eAAe,GACnDpP,KAAKmP,kBAAmB,EACxB,MAQMgB,EAAa,SADUF,EAAU,GAAGxL,cAAgBwL,EAAU7d,MAAM,KAE1E4N,KAAKmF,gBATY,KACfnF,KAAKmP,kBAAmB,EACxBnP,KAAK4E,SAASvJ,UAAU1B,OAAOkV,IAC/B7O,KAAK4E,SAASvJ,UAAU5E,IAAImY,GAAqBD,IACjD3O,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GACjC1P,GAAaqB,QAAQ5B,KAAK4E,SAAU2J,GAAc,GAItBvO,KAAK4E,UAAU,GAC7C5E,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GAAGjQ,KAAK4E,SAASuL,MACpD,CACA,IAAAP,GACE,GAAI5P,KAAKmP,mBAAqBnP,KAAK2P,WACjC,OAGF,GADmBpP,GAAaqB,QAAQ5B,KAAK4E,SAAU4J,IACxCxM,iBACb,OAEF,MAAMiO,EAAYjQ,KAAKkQ,gBACvBlQ,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GAAGjQ,KAAK4E,SAASthB,wBAAwB2sB,OAC1EpU,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIoY,IAC5B7O,KAAK4E,SAASvJ,UAAU1B,OAAOiV,GAAqBD,IACpD,IAAK,MAAM/M,KAAW5B,KAAKoP,cAAe,CACxC,MAAM7vB,EAAUsmB,GAAec,uBAAuB/E,GAClDriB,IAAYygB,KAAK2P,SAASpwB,IAC5BygB,KAAK0P,0BAA0B,CAAC9N,IAAU,EAE9C,CACA5B,KAAKmP,kBAAmB,EAOxBnP,KAAK4E,SAAS7jB,MAAMkvB,GAAa,GACjCjQ,KAAKmF,gBAPY,KACfnF,KAAKmP,kBAAmB,EACxBnP,KAAK4E,SAASvJ,UAAU1B,OAAOkV,IAC/B7O,KAAK4E,SAASvJ,UAAU5E,IAAImY,IAC5BrO,GAAaqB,QAAQ5B,KAAK4E,SAAU6J,GAAe,GAGvBzO,KAAK4E,UAAU,EAC/C,CACA,QAAA+K,CAASpwB,EAAUygB,KAAK4E,UACtB,OAAOrlB,EAAQ8b,UAAU7W,SAASmqB,GACpC,CAGA,iBAAA3K,CAAkBF,GAGhB,OAFAA,EAAO6D,OAAS7G,QAAQgD,EAAO6D,QAC/B7D,EAAOrf,OAASiW,GAAWoJ,EAAOrf,QAC3Bqf,CACT,CACA,aAAAoM,GACE,OAAOlQ,KAAK4E,SAASvJ,UAAU7W,SA3IL,uBAChB,QACC,QA0Ib,CACA,mBAAAirB,GACE,IAAKzP,KAAK6E,QAAQpgB,OAChB,OAEF,MAAMshB,EAAW/F,KAAK+P,uBAAuBhB,IAC7C,IAAK,MAAMxvB,KAAWwmB,EAAU,CAC9B,MAAMqK,EAAWvK,GAAec,uBAAuBpnB,GACnD6wB,GACFpQ,KAAK0P,0BAA0B,CAACnwB,GAAUygB,KAAK2P,SAASS,GAE5D,CACF,CACA,sBAAAL,CAAuBhW,GACrB,MAAMgM,EAAWF,GAAe1T,KAAK2c,GAA4B9O,KAAK6E,QAAQpgB,QAE9E,OAAOohB,GAAe1T,KAAK4H,EAAUiG,KAAK6E,QAAQpgB,QAAQ0B,QAAO5G,IAAYwmB,EAAS3E,SAAS7hB,IACjG,CACA,yBAAAmwB,CAA0BW,EAAcC,GACtC,GAAKD,EAAa3f,OAGlB,IAAK,MAAMnR,KAAW8wB,EACpB9wB,EAAQ8b,UAAUsM,OArKK,aAqKyB2I,GAChD/wB,EAAQ6B,aAAa,gBAAiBkvB,EAE1C,CAGA,sBAAO7T,CAAgBqH,GACrB,MAAMe,EAAU,CAAC,EAIjB,MAHsB,iBAAXf,GAAuB,YAAYzgB,KAAKygB,KACjDe,EAAQ8C,QAAS,GAEZ3H,KAAKwH,MAAK,WACf,MAAMnd,EAAO6kB,GAAS5J,oBAAoBtF,KAAM6E,GAChD,GAAsB,iBAAXf,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,CACF,GACF,EAOFvD,GAAac,GAAGhc,SAAUqpB,GAAwBK,IAAwB,SAAU3P,IAErD,MAAzBA,EAAM7S,OAAO0a,SAAmB7H,EAAMW,gBAAmD,MAAjCX,EAAMW,eAAekH,UAC/E7H,EAAMkD,iBAER,IAAK,MAAM/iB,KAAWsmB,GAAee,gCAAgC5G,MACnEkP,GAAS5J,oBAAoB/lB,EAAS,CACpCooB,QAAQ,IACPA,QAEP,IAMAxL,GAAmB+S,IAcnB,MAAMqB,GAAS,WAETC,GAAc,eACdC,GAAiB,YAGjBC,GAAiB,UACjBC,GAAmB,YAGnBC,GAAe,OAAOJ,KACtBK,GAAiB,SAASL,KAC1BM,GAAe,OAAON,KACtBO,GAAgB,QAAQP,KACxBQ,GAAyB,QAAQR,KAAcC,KAC/CQ,GAAyB,UAAUT,KAAcC,KACjDS,GAAuB,QAAQV,KAAcC,KAC7CU,GAAoB,OAMpBC,GAAyB,4DACzBC,GAA6B,GAAGD,MAA0BD,KAC1DG,GAAgB,iBAIhBC,GAAgBtV,KAAU,UAAY,YACtCuV,GAAmBvV,KAAU,YAAc,UAC3CwV,GAAmBxV,KAAU,aAAe,eAC5CyV,GAAsBzV,KAAU,eAAiB,aACjD0V,GAAkB1V,KAAU,aAAe,cAC3C2V,GAAiB3V,KAAU,cAAgB,aAG3C4V,GAAY,CAChBC,WAAW,EACX7jB,SAAU,kBACV8jB,QAAS,UACT/pB,OAAQ,CAAC,EAAG,GACZgqB,aAAc,KACd1zB,UAAW,UAEP2zB,GAAgB,CACpBH,UAAW,mBACX7jB,SAAU,mBACV8jB,QAAS,SACT/pB,OAAQ,0BACRgqB,aAAc,yBACd1zB,UAAW,2BAOb,MAAM4zB,WAAiBxN,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKmS,QAAU,KACfnS,KAAKoS,QAAUpS,KAAK4E,SAAS7f,WAE7Bib,KAAKqS,MAAQxM,GAAehhB,KAAKmb,KAAK4E,SAAU0M,IAAe,IAAMzL,GAAeM,KAAKnG,KAAK4E,SAAU0M,IAAe,IAAMzL,GAAeC,QAAQwL,GAAetR,KAAKoS,SACxKpS,KAAKsS,UAAYtS,KAAKuS,eACxB,CAGA,kBAAW7O,GACT,OAAOmO,EACT,CACA,sBAAWlO,GACT,OAAOsO,EACT,CACA,eAAW1V,GACT,OAAOgU,EACT,CAGA,MAAA5I,GACE,OAAO3H,KAAK2P,WAAa3P,KAAK4P,OAAS5P,KAAK6P,MAC9C,CACA,IAAAA,GACE,GAAI3U,GAAW8E,KAAK4E,WAAa5E,KAAK2P,WACpC,OAEF,MAAM7P,EAAgB,CACpBA,cAAeE,KAAK4E,UAGtB,IADkBrE,GAAaqB,QAAQ5B,KAAK4E,SAAUkM,GAAchR,GACtDkC,iBAAd,CASA,GANAhC,KAAKwS,gBAMD,iBAAkBntB,SAASC,kBAAoB0a,KAAKoS,QAAQpX,QAzExC,eA0EtB,IAAK,MAAMzb,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAac,GAAG9hB,EAAS,YAAaqc,IAG1CoE,KAAK4E,SAAS6N,QACdzS,KAAK4E,SAASxjB,aAAa,iBAAiB,GAC5C4e,KAAKqS,MAAMhX,UAAU5E,IAAI0a,IACzBnR,KAAK4E,SAASvJ,UAAU5E,IAAI0a,IAC5B5Q,GAAaqB,QAAQ5B,KAAK4E,SAAUmM,GAAejR,EAhBnD,CAiBF,CACA,IAAA8P,GACE,GAAI1U,GAAW8E,KAAK4E,YAAc5E,KAAK2P,WACrC,OAEF,MAAM7P,EAAgB,CACpBA,cAAeE,KAAK4E,UAEtB5E,KAAK0S,cAAc5S,EACrB,CACA,OAAAiF,GACM/E,KAAKmS,SACPnS,KAAKmS,QAAQnZ,UAEf2L,MAAMI,SACR,CACA,MAAAha,GACEiV,KAAKsS,UAAYtS,KAAKuS,gBAClBvS,KAAKmS,SACPnS,KAAKmS,QAAQpnB,QAEjB,CAGA,aAAA2nB,CAAc5S,GAEZ,IADkBS,GAAaqB,QAAQ5B,KAAK4E,SAAUgM,GAAc9Q,GACtDkC,iBAAd,CAMA,GAAI,iBAAkB3c,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAGvCoE,KAAKmS,SACPnS,KAAKmS,QAAQnZ,UAEfgH,KAAKqS,MAAMhX,UAAU1B,OAAOwX,IAC5BnR,KAAK4E,SAASvJ,UAAU1B,OAAOwX,IAC/BnR,KAAK4E,SAASxjB,aAAa,gBAAiB,SAC5C4hB,GAAYE,oBAAoBlD,KAAKqS,MAAO,UAC5C9R,GAAaqB,QAAQ5B,KAAK4E,SAAUiM,GAAgB/Q,EAhBpD,CAiBF,CACA,UAAA+D,CAAWC,GAET,GAAgC,iBADhCA,EAASa,MAAMd,WAAWC,IACRxlB,YAA2B,GAAUwlB,EAAOxlB,YAAgE,mBAA3CwlB,EAAOxlB,UAAUgF,sBAElG,MAAM,IAAIkhB,UAAU,GAAG+L,GAAO9L,+GAEhC,OAAOX,CACT,CACA,aAAA0O,GACE,QAAsB,IAAX,EACT,MAAM,IAAIhO,UAAU,gEAEtB,IAAImO,EAAmB3S,KAAK4E,SACG,WAA3B5E,KAAK6E,QAAQvmB,UACfq0B,EAAmB3S,KAAKoS,QACf,GAAUpS,KAAK6E,QAAQvmB,WAChCq0B,EAAmBjY,GAAWsF,KAAK6E,QAAQvmB,WACA,iBAA3B0hB,KAAK6E,QAAQvmB,YAC7Bq0B,EAAmB3S,KAAK6E,QAAQvmB,WAElC,MAAM0zB,EAAehS,KAAK4S,mBAC1B5S,KAAKmS,QAAU,GAAoBQ,EAAkB3S,KAAKqS,MAAOL,EACnE,CACA,QAAArC,GACE,OAAO3P,KAAKqS,MAAMhX,UAAU7W,SAAS2sB,GACvC,CACA,aAAA0B,GACE,MAAMC,EAAiB9S,KAAKoS,QAC5B,GAAIU,EAAezX,UAAU7W,SArKN,WAsKrB,OAAOmtB,GAET,GAAImB,EAAezX,UAAU7W,SAvKJ,aAwKvB,OAAOotB,GAET,GAAIkB,EAAezX,UAAU7W,SAzKA,iBA0K3B,MA5JsB,MA8JxB,GAAIsuB,EAAezX,UAAU7W,SA3KE,mBA4K7B,MA9JyB,SAkK3B,MAAMuuB,EAAkF,QAA1E9tB,iBAAiB+a,KAAKqS,OAAOvX,iBAAiB,iBAAiB6K,OAC7E,OAAImN,EAAezX,UAAU7W,SArLP,UAsLbuuB,EAAQvB,GAAmBD,GAE7BwB,EAAQrB,GAAsBD,EACvC,CACA,aAAAc,GACE,OAAkD,OAA3CvS,KAAK4E,SAAS5J,QAnLD,UAoLtB,CACA,UAAAgY,GACE,MAAM,OACJhrB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAOgQ,SAAS5vB,EAAO,MAEzC,mBAAXqK,EACFirB,GAAcjrB,EAAOirB,EAAYjT,KAAK4E,UAExC5c,CACT,CACA,gBAAA4qB,GACE,MAAMM,EAAwB,CAC5Bx0B,UAAWshB,KAAK6S,gBAChBzc,UAAW,CAAC,CACV9V,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAKgT,iBAanB,OAPIhT,KAAKsS,WAAsC,WAAzBtS,KAAK6E,QAAQkN,WACjC/O,GAAYC,iBAAiBjD,KAAKqS,MAAO,SAAU,UACnDa,EAAsB9c,UAAY,CAAC,CACjC9V,KAAM,cACNC,SAAS,KAGN,IACF2yB,KACArW,GAAQmD,KAAK6E,QAAQmN,aAAc,CAACkB,IAE3C,CACA,eAAAC,EAAgB,IACdr2B,EAAG,OACHyP,IAEA,MAAMggB,EAAQ1G,GAAe1T,KAhOF,8DAgO+B6N,KAAKqS,OAAOlsB,QAAO5G,GAAWob,GAAUpb,KAC7FgtB,EAAM7b,QAMXoN,GAAqByO,EAAOhgB,EAAQzP,IAAQ6zB,IAAmBpE,EAAMnL,SAAS7U,IAASkmB,OACzF,CAGA,sBAAOhW,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAO6nB,GAAS5M,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,CACA,iBAAOsP,CAAWhU,GAChB,GA5QuB,IA4QnBA,EAAMwI,QAAgD,UAAfxI,EAAMqB,MA/QnC,QA+QuDrB,EAAMtiB,IACzE,OAEF,MAAMu2B,EAAcxN,GAAe1T,KAAKkf,IACxC,IAAK,MAAM1J,KAAU0L,EAAa,CAChC,MAAMC,EAAUpB,GAAS7M,YAAYsC,GACrC,IAAK2L,IAAyC,IAA9BA,EAAQzO,QAAQiN,UAC9B,SAEF,MAAMyB,EAAenU,EAAMmU,eACrBC,EAAeD,EAAanS,SAASkS,EAAQjB,OACnD,GAAIkB,EAAanS,SAASkS,EAAQ1O,WAA2C,WAA9B0O,EAAQzO,QAAQiN,YAA2B0B,GAA8C,YAA9BF,EAAQzO,QAAQiN,WAA2B0B,EACnJ,SAIF,GAAIF,EAAQjB,MAAM7tB,SAAS4a,EAAM7S,UAA2B,UAAf6S,EAAMqB,MA/RvC,QA+R2DrB,EAAMtiB,KAAqB,qCAAqCuG,KAAK+b,EAAM7S,OAAO0a,UACvJ,SAEF,MAAMnH,EAAgB,CACpBA,cAAewT,EAAQ1O,UAEN,UAAfxF,EAAMqB,OACRX,EAAckH,WAAa5H,GAE7BkU,EAAQZ,cAAc5S,EACxB,CACF,CACA,4BAAO2T,CAAsBrU,GAI3B,MAAMsU,EAAU,kBAAkBrwB,KAAK+b,EAAM7S,OAAO0a,SAC9C0M,EAjTW,WAiTKvU,EAAMtiB,IACtB82B,EAAkB,CAAClD,GAAgBC,IAAkBvP,SAAShC,EAAMtiB,KAC1E,IAAK82B,IAAoBD,EACvB,OAEF,GAAID,IAAYC,EACd,OAEFvU,EAAMkD,iBAGN,MAAMuR,EAAkB7T,KAAKgG,QAAQoL,IAA0BpR,KAAO6F,GAAeM,KAAKnG,KAAMoR,IAAwB,IAAMvL,GAAehhB,KAAKmb,KAAMoR,IAAwB,IAAMvL,GAAeC,QAAQsL,GAAwBhS,EAAMW,eAAehb,YACpPwF,EAAW2nB,GAAS5M,oBAAoBuO,GAC9C,GAAID,EAIF,OAHAxU,EAAM0U,kBACNvpB,EAASslB,YACTtlB,EAAS4oB,gBAAgB/T,GAGvB7U,EAASolB,aAEXvQ,EAAM0U,kBACNvpB,EAASqlB,OACTiE,EAAgBpB,QAEpB,EAOFlS,GAAac,GAAGhc,SAAU4rB,GAAwBG,GAAwBc,GAASuB,uBACnFlT,GAAac,GAAGhc,SAAU4rB,GAAwBK,GAAeY,GAASuB,uBAC1ElT,GAAac,GAAGhc,SAAU2rB,GAAwBkB,GAASkB,YAC3D7S,GAAac,GAAGhc,SAAU6rB,GAAsBgB,GAASkB,YACzD7S,GAAac,GAAGhc,SAAU2rB,GAAwBI,IAAwB,SAAUhS,GAClFA,EAAMkD,iBACN4P,GAAS5M,oBAAoBtF,MAAM2H,QACrC,IAMAxL,GAAmB+V,IAcnB,MAAM6B,GAAS,WAETC,GAAoB,OACpBC,GAAkB,gBAAgBF,KAClCG,GAAY,CAChBC,UAAW,iBACXC,cAAe,KACfhP,YAAY,EACZzK,WAAW,EAEX0Z,YAAa,QAETC,GAAgB,CACpBH,UAAW,SACXC,cAAe,kBACfhP,WAAY,UACZzK,UAAW,UACX0Z,YAAa,oBAOf,MAAME,WAAiB9Q,GACrB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKwU,aAAc,EACnBxU,KAAK4E,SAAW,IAClB,CAGA,kBAAWlB,GACT,OAAOwQ,EACT,CACA,sBAAWvQ,GACT,OAAO2Q,EACT,CACA,eAAW/X,GACT,OAAOwX,EACT,CAGA,IAAAlE,CAAKxT,GACH,IAAK2D,KAAK6E,QAAQlK,UAEhB,YADAkC,GAAQR,GAGV2D,KAAKyU,UACL,MAAMl1B,EAAUygB,KAAK0U,cACjB1U,KAAK6E,QAAQO,YACfvJ,GAAOtc,GAETA,EAAQ8b,UAAU5E,IAAIud,IACtBhU,KAAK2U,mBAAkB,KACrB9X,GAAQR,EAAS,GAErB,CACA,IAAAuT,CAAKvT,GACE2D,KAAK6E,QAAQlK,WAIlBqF,KAAK0U,cAAcrZ,UAAU1B,OAAOqa,IACpChU,KAAK2U,mBAAkB,KACrB3U,KAAK+E,UACLlI,GAAQR,EAAS,KANjBQ,GAAQR,EAQZ,CACA,OAAA0I,GACO/E,KAAKwU,cAGVjU,GAAaC,IAAIR,KAAK4E,SAAUqP,IAChCjU,KAAK4E,SAASjL,SACdqG,KAAKwU,aAAc,EACrB,CAGA,WAAAE,GACE,IAAK1U,KAAK4E,SAAU,CAClB,MAAMgQ,EAAWvvB,SAASwvB,cAAc,OACxCD,EAAST,UAAYnU,KAAK6E,QAAQsP,UAC9BnU,KAAK6E,QAAQO,YACfwP,EAASvZ,UAAU5E,IApFD,QAsFpBuJ,KAAK4E,SAAWgQ,CAClB,CACA,OAAO5U,KAAK4E,QACd,CACA,iBAAAZ,CAAkBF,GAGhB,OADAA,EAAOuQ,YAAc3Z,GAAWoJ,EAAOuQ,aAChCvQ,CACT,CACA,OAAA2Q,GACE,GAAIzU,KAAKwU,YACP,OAEF,MAAMj1B,EAAUygB,KAAK0U,cACrB1U,KAAK6E,QAAQwP,YAAYS,OAAOv1B,GAChCghB,GAAac,GAAG9hB,EAAS00B,IAAiB,KACxCpX,GAAQmD,KAAK6E,QAAQuP,cAAc,IAErCpU,KAAKwU,aAAc,CACrB,CACA,iBAAAG,CAAkBtY,GAChBW,GAAuBX,EAAU2D,KAAK0U,cAAe1U,KAAK6E,QAAQO,WACpE,EAeF,MAEM2P,GAAc,gBACdC,GAAkB,UAAUD,KAC5BE,GAAoB,cAAcF,KAGlCG,GAAmB,WACnBC,GAAY,CAChBC,WAAW,EACXC,YAAa,MAETC,GAAgB,CACpBF,UAAW,UACXC,YAAa,WAOf,MAAME,WAAkB9R,GACtB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKwV,WAAY,EACjBxV,KAAKyV,qBAAuB,IAC9B,CAGA,kBAAW/R,GACT,OAAOyR,EACT,CACA,sBAAWxR,GACT,OAAO2R,EACT,CACA,eAAW/Y,GACT,MArCW,WAsCb,CAGA,QAAAmZ,GACM1V,KAAKwV,YAGLxV,KAAK6E,QAAQuQ,WACfpV,KAAK6E,QAAQwQ,YAAY5C,QAE3BlS,GAAaC,IAAInb,SAAU0vB,IAC3BxU,GAAac,GAAGhc,SAAU2vB,IAAiB5V,GAASY,KAAK2V,eAAevW,KACxEmB,GAAac,GAAGhc,SAAU4vB,IAAmB7V,GAASY,KAAK4V,eAAexW,KAC1EY,KAAKwV,WAAY,EACnB,CACA,UAAAK,GACO7V,KAAKwV,YAGVxV,KAAKwV,WAAY,EACjBjV,GAAaC,IAAInb,SAAU0vB,IAC7B,CAGA,cAAAY,CAAevW,GACb,MAAM,YACJiW,GACErV,KAAK6E,QACT,GAAIzF,EAAM7S,SAAWlH,UAAY+Z,EAAM7S,SAAW8oB,GAAeA,EAAY7wB,SAAS4a,EAAM7S,QAC1F,OAEF,MAAM1L,EAAWglB,GAAeU,kBAAkB8O,GAC1B,IAApBx0B,EAAS6P,OACX2kB,EAAY5C,QACHzS,KAAKyV,uBAAyBP,GACvCr0B,EAASA,EAAS6P,OAAS,GAAG+hB,QAE9B5xB,EAAS,GAAG4xB,OAEhB,CACA,cAAAmD,CAAexW,GAzED,QA0ERA,EAAMtiB,MAGVkjB,KAAKyV,qBAAuBrW,EAAM0W,SAAWZ,GA5EzB,UA6EtB,EAeF,MAAMa,GAAyB,oDACzBC,GAA0B,cAC1BC,GAAmB,gBACnBC,GAAkB,eAMxB,MAAMC,GACJ,WAAAhS,GACEnE,KAAK4E,SAAWvf,SAAS6G,IAC3B,CAGA,QAAAkqB,GAEE,MAAMC,EAAgBhxB,SAASC,gBAAgBuC,YAC/C,OAAO1F,KAAKoC,IAAI3E,OAAO02B,WAAaD,EACtC,CACA,IAAAzG,GACE,MAAM/rB,EAAQmc,KAAKoW,WACnBpW,KAAKuW,mBAELvW,KAAKwW,sBAAsBxW,KAAK4E,SAAUqR,IAAkBQ,GAAmBA,EAAkB5yB,IAEjGmc,KAAKwW,sBAAsBT,GAAwBE,IAAkBQ,GAAmBA,EAAkB5yB,IAC1Gmc,KAAKwW,sBAAsBR,GAAyBE,IAAiBO,GAAmBA,EAAkB5yB,GAC5G,CACA,KAAAwO,GACE2N,KAAK0W,wBAAwB1W,KAAK4E,SAAU,YAC5C5E,KAAK0W,wBAAwB1W,KAAK4E,SAAUqR,IAC5CjW,KAAK0W,wBAAwBX,GAAwBE,IACrDjW,KAAK0W,wBAAwBV,GAAyBE,GACxD,CACA,aAAAS,GACE,OAAO3W,KAAKoW,WAAa,CAC3B,CAGA,gBAAAG,GACEvW,KAAK4W,sBAAsB5W,KAAK4E,SAAU,YAC1C5E,KAAK4E,SAAS7jB,MAAM+K,SAAW,QACjC,CACA,qBAAA0qB,CAAsBzc,EAAU8c,EAAexa,GAC7C,MAAMya,EAAiB9W,KAAKoW,WAS5BpW,KAAK+W,2BAA2Bhd,GARHxa,IAC3B,GAAIA,IAAYygB,KAAK4E,UAAYhlB,OAAO02B,WAAa/2B,EAAQsI,YAAcivB,EACzE,OAEF9W,KAAK4W,sBAAsBr3B,EAASs3B,GACpC,MAAMJ,EAAkB72B,OAAOqF,iBAAiB1F,GAASub,iBAAiB+b,GAC1Et3B,EAAQwB,MAAMi2B,YAAYH,EAAe,GAAGxa,EAASkB,OAAOC,WAAWiZ,QAAsB,GAGjG,CACA,qBAAAG,CAAsBr3B,EAASs3B,GAC7B,MAAMI,EAAc13B,EAAQwB,MAAM+Z,iBAAiB+b,GAC/CI,GACFjU,GAAYC,iBAAiB1jB,EAASs3B,EAAeI,EAEzD,CACA,uBAAAP,CAAwB3c,EAAU8c,GAWhC7W,KAAK+W,2BAA2Bhd,GAVHxa,IAC3B,MAAM5B,EAAQqlB,GAAYQ,iBAAiBjkB,EAASs3B,GAEtC,OAAVl5B,GAIJqlB,GAAYE,oBAAoB3jB,EAASs3B,GACzCt3B,EAAQwB,MAAMi2B,YAAYH,EAAel5B,IAJvC4B,EAAQwB,MAAMm2B,eAAeL,EAIgB,GAGnD,CACA,0BAAAE,CAA2Bhd,EAAUod,GACnC,GAAI,GAAUpd,GACZod,EAASpd,QAGX,IAAK,MAAM6L,KAAOC,GAAe1T,KAAK4H,EAAUiG,KAAK4E,UACnDuS,EAASvR,EAEb,EAeF,MAEMwR,GAAc,YAGdC,GAAe,OAAOD,KACtBE,GAAyB,gBAAgBF,KACzCG,GAAiB,SAASH,KAC1BI,GAAe,OAAOJ,KACtBK,GAAgB,QAAQL,KACxBM,GAAiB,SAASN,KAC1BO,GAAsB,gBAAgBP,KACtCQ,GAA0B,oBAAoBR,KAC9CS,GAA0B,kBAAkBT,KAC5CU,GAAyB,QAAQV,cACjCW,GAAkB,aAElBC,GAAoB,OACpBC,GAAoB,eAKpBC,GAAY,CAChBtD,UAAU,EACVnC,OAAO,EACPzH,UAAU,GAENmN,GAAgB,CACpBvD,SAAU,mBACVnC,MAAO,UACPzH,SAAU,WAOZ,MAAMoN,WAAc1T,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKqY,QAAUxS,GAAeC,QArBV,gBAqBmC9F,KAAK4E,UAC5D5E,KAAKsY,UAAYtY,KAAKuY,sBACtBvY,KAAKwY,WAAaxY,KAAKyY,uBACvBzY,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAK0Y,WAAa,IAAIvC,GACtBnW,KAAK6L,oBACP,CAGA,kBAAWnI,GACT,OAAOwU,EACT,CACA,sBAAWvU,GACT,OAAOwU,EACT,CACA,eAAW5b,GACT,MA1DW,OA2Db,CAGA,MAAAoL,CAAO7H,GACL,OAAOE,KAAK2P,SAAW3P,KAAK4P,OAAS5P,KAAK6P,KAAK/P,EACjD,CACA,IAAA+P,CAAK/P,GACCE,KAAK2P,UAAY3P,KAAKmP,kBAGR5O,GAAaqB,QAAQ5B,KAAK4E,SAAU4S,GAAc,CAClE1X,kBAEYkC,mBAGdhC,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAK0Y,WAAW9I,OAChBvqB,SAAS6G,KAAKmP,UAAU5E,IAAIshB,IAC5B/X,KAAK2Y,gBACL3Y,KAAKsY,UAAUzI,MAAK,IAAM7P,KAAK4Y,aAAa9Y,KAC9C,CACA,IAAA8P,GACO5P,KAAK2P,WAAY3P,KAAKmP,mBAGT5O,GAAaqB,QAAQ5B,KAAK4E,SAAUyS,IACxCrV,mBAGdhC,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAKwY,WAAW3C,aAChB7V,KAAK4E,SAASvJ,UAAU1B,OAAOqe,IAC/BhY,KAAKmF,gBAAe,IAAMnF,KAAK6Y,cAAc7Y,KAAK4E,SAAU5E,KAAKgO,gBACnE,CACA,OAAAjJ,GACExE,GAAaC,IAAI5gB,OAAQw3B,IACzB7W,GAAaC,IAAIR,KAAKqY,QAASjB,IAC/BpX,KAAKsY,UAAUvT,UACf/E,KAAKwY,WAAW3C,aAChBlR,MAAMI,SACR,CACA,YAAA+T,GACE9Y,KAAK2Y,eACP,CAGA,mBAAAJ,GACE,OAAO,IAAIhE,GAAS,CAClB5Z,UAAWmG,QAAQd,KAAK6E,QAAQ+P,UAEhCxP,WAAYpF,KAAKgO,eAErB,CACA,oBAAAyK,GACE,OAAO,IAAIlD,GAAU,CACnBF,YAAarV,KAAK4E,UAEtB,CACA,YAAAgU,CAAa9Y,GAENza,SAAS6G,KAAK1H,SAASwb,KAAK4E,WAC/Bvf,SAAS6G,KAAK4oB,OAAO9U,KAAK4E,UAE5B5E,KAAK4E,SAAS7jB,MAAMgxB,QAAU,QAC9B/R,KAAK4E,SAASzjB,gBAAgB,eAC9B6e,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASnZ,UAAY,EAC1B,MAAMstB,EAAYlT,GAAeC,QA7GT,cA6GsC9F,KAAKqY,SAC/DU,IACFA,EAAUttB,UAAY,GAExBoQ,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIuhB,IAU5BhY,KAAKmF,gBATsB,KACrBnF,KAAK6E,QAAQ4N,OACfzS,KAAKwY,WAAW9C,WAElB1V,KAAKmP,kBAAmB,EACxB5O,GAAaqB,QAAQ5B,KAAK4E,SAAU6S,GAAe,CACjD3X,iBACA,GAEoCE,KAAKqY,QAASrY,KAAKgO,cAC7D,CACA,kBAAAnC,GACEtL,GAAac,GAAGrB,KAAK4E,SAAUiT,IAAyBzY,IAhJvC,WAiJXA,EAAMtiB,MAGNkjB,KAAK6E,QAAQmG,SACfhL,KAAK4P,OAGP5P,KAAKgZ,6BAA4B,IAEnCzY,GAAac,GAAGzhB,OAAQ83B,IAAgB,KAClC1X,KAAK2P,WAAa3P,KAAKmP,kBACzBnP,KAAK2Y,eACP,IAEFpY,GAAac,GAAGrB,KAAK4E,SAAUgT,IAAyBxY,IAEtDmB,GAAae,IAAItB,KAAK4E,SAAU+S,IAAqBsB,IAC/CjZ,KAAK4E,WAAaxF,EAAM7S,QAAUyT,KAAK4E,WAAaqU,EAAO1sB,SAGjC,WAA1ByT,KAAK6E,QAAQ+P,SAIb5U,KAAK6E,QAAQ+P,UACf5U,KAAK4P,OAJL5P,KAAKgZ,6BAKP,GACA,GAEN,CACA,UAAAH,GACE7Y,KAAK4E,SAAS7jB,MAAMgxB,QAAU,OAC9B/R,KAAK4E,SAASxjB,aAAa,eAAe,GAC1C4e,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QAC9B6e,KAAKmP,kBAAmB,EACxBnP,KAAKsY,UAAU1I,MAAK,KAClBvqB,SAAS6G,KAAKmP,UAAU1B,OAAOoe,IAC/B/X,KAAKkZ,oBACLlZ,KAAK0Y,WAAWrmB,QAChBkO,GAAaqB,QAAQ5B,KAAK4E,SAAU2S,GAAe,GAEvD,CACA,WAAAvJ,GACE,OAAOhO,KAAK4E,SAASvJ,UAAU7W,SAjLT,OAkLxB,CACA,0BAAAw0B,GAEE,GADkBzY,GAAaqB,QAAQ5B,KAAK4E,SAAU0S,IACxCtV,iBACZ,OAEF,MAAMmX,EAAqBnZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3EwxB,EAAmBpZ,KAAK4E,SAAS7jB,MAAMiL,UAEpB,WAArBotB,GAAiCpZ,KAAK4E,SAASvJ,UAAU7W,SAASyzB,MAGjEkB,IACHnZ,KAAK4E,SAAS7jB,MAAMiL,UAAY,UAElCgU,KAAK4E,SAASvJ,UAAU5E,IAAIwhB,IAC5BjY,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAASvJ,UAAU1B,OAAOse,IAC/BjY,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAAS7jB,MAAMiL,UAAYotB,CAAgB,GAC/CpZ,KAAKqY,QAAQ,GACfrY,KAAKqY,SACRrY,KAAK4E,SAAS6N,QAChB,CAMA,aAAAkG,GACE,MAAMQ,EAAqBnZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3EkvB,EAAiB9W,KAAK0Y,WAAWtC,WACjCiD,EAAoBvC,EAAiB,EAC3C,GAAIuC,IAAsBF,EAAoB,CAC5C,MAAMr3B,EAAWma,KAAU,cAAgB,eAC3C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAGg1B,KACrC,CACA,IAAKuC,GAAqBF,EAAoB,CAC5C,MAAMr3B,EAAWma,KAAU,eAAiB,cAC5C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAGg1B,KACrC,CACF,CACA,iBAAAoC,GACElZ,KAAK4E,SAAS7jB,MAAMu4B,YAAc,GAClCtZ,KAAK4E,SAAS7jB,MAAMw4B,aAAe,EACrC,CAGA,sBAAO9c,CAAgBqH,EAAQhE,GAC7B,OAAOE,KAAKwH,MAAK,WACf,MAAMnd,EAAO+tB,GAAM9S,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQhE,EAJb,CAKF,GACF,EAOFS,GAAac,GAAGhc,SAAUyyB,GA9OK,4BA8O2C,SAAU1Y,GAClF,MAAM7S,EAASsZ,GAAec,uBAAuB3G,MACjD,CAAC,IAAK,QAAQoB,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAER/B,GAAae,IAAI/U,EAAQirB,IAAcgC,IACjCA,EAAUxX,kBAIdzB,GAAae,IAAI/U,EAAQgrB,IAAgB,KACnC5c,GAAUqF,OACZA,KAAKyS,OACP,GACA,IAIJ,MAAMgH,EAAc5T,GAAeC,QAnQb,eAoQlB2T,GACFrB,GAAM/S,YAAYoU,GAAa7J,OAEpBwI,GAAM9S,oBAAoB/Y,GAClCob,OAAO3H,KACd,IACA6G,GAAqBuR,IAMrBjc,GAAmBic,IAcnB,MAEMsB,GAAc,gBACdC,GAAiB,YACjBC,GAAwB,OAAOF,KAAcC,KAE7CE,GAAoB,OACpBC,GAAuB,UACvBC,GAAoB,SAEpBC,GAAgB,kBAChBC,GAAe,OAAOP,KACtBQ,GAAgB,QAAQR,KACxBS,GAAe,OAAOT,KACtBU,GAAuB,gBAAgBV,KACvCW,GAAiB,SAASX,KAC1BY,GAAe,SAASZ,KACxBa,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAwB,kBAAkBd,KAE1Ce,GAAY,CAChB7F,UAAU,EACV5J,UAAU,EACVvgB,QAAQ,GAEJiwB,GAAgB,CACpB9F,SAAU,mBACV5J,SAAU,UACVvgB,OAAQ,WAOV,MAAMkwB,WAAkBjW,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAK2P,UAAW,EAChB3P,KAAKsY,UAAYtY,KAAKuY,sBACtBvY,KAAKwY,WAAaxY,KAAKyY,uBACvBzY,KAAK6L,oBACP,CAGA,kBAAWnI,GACT,OAAO+W,EACT,CACA,sBAAW9W,GACT,OAAO+W,EACT,CACA,eAAWne,GACT,MApDW,WAqDb,CAGA,MAAAoL,CAAO7H,GACL,OAAOE,KAAK2P,SAAW3P,KAAK4P,OAAS5P,KAAK6P,KAAK/P,EACjD,CACA,IAAA+P,CAAK/P,GACCE,KAAK2P,UAGSpP,GAAaqB,QAAQ5B,KAAK4E,SAAUqV,GAAc,CAClEna,kBAEYkC,mBAGdhC,KAAK2P,UAAW,EAChB3P,KAAKsY,UAAUzI,OACV7P,KAAK6E,QAAQpa,SAChB,IAAI0rB,IAAkBvG,OAExB5P,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASvJ,UAAU5E,IAAIqjB,IAW5B9Z,KAAKmF,gBAVoB,KAClBnF,KAAK6E,QAAQpa,SAAUuV,KAAK6E,QAAQ+P,UACvC5U,KAAKwY,WAAW9C,WAElB1V,KAAK4E,SAASvJ,UAAU5E,IAAIojB,IAC5B7Z,KAAK4E,SAASvJ,UAAU1B,OAAOmgB,IAC/BvZ,GAAaqB,QAAQ5B,KAAK4E,SAAUsV,GAAe,CACjDpa,iBACA,GAEkCE,KAAK4E,UAAU,GACvD,CACA,IAAAgL,GACO5P,KAAK2P,WAGQpP,GAAaqB,QAAQ5B,KAAK4E,SAAUuV,IACxCnY,mBAGdhC,KAAKwY,WAAW3C,aAChB7V,KAAK4E,SAASgW,OACd5a,KAAK2P,UAAW,EAChB3P,KAAK4E,SAASvJ,UAAU5E,IAAIsjB,IAC5B/Z,KAAKsY,UAAU1I,OAUf5P,KAAKmF,gBAToB,KACvBnF,KAAK4E,SAASvJ,UAAU1B,OAAOkgB,GAAmBE,IAClD/Z,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QACzB6e,KAAK6E,QAAQpa,SAChB,IAAI0rB,IAAkB9jB,QAExBkO,GAAaqB,QAAQ5B,KAAK4E,SAAUyV,GAAe,GAEfra,KAAK4E,UAAU,IACvD,CACA,OAAAG,GACE/E,KAAKsY,UAAUvT,UACf/E,KAAKwY,WAAW3C,aAChBlR,MAAMI,SACR,CAGA,mBAAAwT,GACE,MASM5d,EAAYmG,QAAQd,KAAK6E,QAAQ+P,UACvC,OAAO,IAAIL,GAAS,CAClBJ,UA3HsB,qBA4HtBxZ,YACAyK,YAAY,EACZiP,YAAarU,KAAK4E,SAAS7f,WAC3BqvB,cAAezZ,EAfK,KACU,WAA1BqF,KAAK6E,QAAQ+P,SAIjB5U,KAAK4P,OAHHrP,GAAaqB,QAAQ5B,KAAK4E,SAAUwV,GAG3B,EAUgC,MAE/C,CACA,oBAAA3B,GACE,OAAO,IAAIlD,GAAU,CACnBF,YAAarV,KAAK4E,UAEtB,CACA,kBAAAiH,GACEtL,GAAac,GAAGrB,KAAK4E,SAAU4V,IAAuBpb,IA5IvC,WA6ITA,EAAMtiB,MAGNkjB,KAAK6E,QAAQmG,SACfhL,KAAK4P,OAGPrP,GAAaqB,QAAQ5B,KAAK4E,SAAUwV,IAAqB,GAE7D,CAGA,sBAAO3d,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOswB,GAAUrV,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOFO,GAAac,GAAGhc,SAAUk1B,GA7JK,gCA6J2C,SAAUnb,GAClF,MAAM7S,EAASsZ,GAAec,uBAAuB3G,MAIrD,GAHI,CAAC,IAAK,QAAQoB,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEFO,GAAae,IAAI/U,EAAQ8tB,IAAgB,KAEnC1f,GAAUqF,OACZA,KAAKyS,OACP,IAIF,MAAMgH,EAAc5T,GAAeC,QAAQkU,IACvCP,GAAeA,IAAgBltB,GACjCouB,GAAUtV,YAAYoU,GAAa7J,OAExB+K,GAAUrV,oBAAoB/Y,GACtCob,OAAO3H,KACd,IACAO,GAAac,GAAGzhB,OAAQg6B,IAAuB,KAC7C,IAAK,MAAM7f,KAAY8L,GAAe1T,KAAK6nB,IACzCW,GAAUrV,oBAAoBvL,GAAU8V,MAC1C,IAEFtP,GAAac,GAAGzhB,OAAQ06B,IAAc,KACpC,IAAK,MAAM/6B,KAAWsmB,GAAe1T,KAAK,gDACG,UAAvClN,iBAAiB1F,GAASiC,UAC5Bm5B,GAAUrV,oBAAoB/lB,GAASqwB,MAE3C,IAEF/I,GAAqB8T,IAMrBxe,GAAmBwe,IAUnB,MACME,GAAmB,CAEvB,IAAK,CAAC,QAAS,MAAO,KAAM,OAAQ,OAHP,kBAI7BhqB,EAAG,CAAC,SAAU,OAAQ,QAAS,OAC/BiqB,KAAM,GACNhqB,EAAG,GACHiqB,GAAI,GACJC,IAAK,GACLC,KAAM,GACNC,GAAI,GACJC,IAAK,GACLC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJxqB,EAAG,GACH0b,IAAK,CAAC,MAAO,SAAU,MAAO,QAAS,QAAS,UAChD+O,GAAI,GACJC,GAAI,GACJC,EAAG,GACHC,IAAK,GACLC,EAAG,GACHC,MAAO,GACPC,KAAM,GACNC,IAAK,GACLC,IAAK,GACLC,OAAQ,GACRC,EAAG,GACHC,GAAI,IAIAC,GAAgB,IAAIpmB,IAAI,CAAC,aAAc,OAAQ,OAAQ,WAAY,WAAY,SAAU,MAAO,eAShGqmB,GAAmB,0DACnBC,GAAmB,CAAC76B,EAAW86B,KACnC,MAAMC,EAAgB/6B,EAAUvC,SAASC,cACzC,OAAIo9B,EAAqBzb,SAAS0b,IAC5BJ,GAAc/lB,IAAImmB,IACbhc,QAAQ6b,GAAiBt5B,KAAKtB,EAAUg7B,YAM5CF,EAAqB12B,QAAO62B,GAAkBA,aAA0BzY,SAAQ9R,MAAKwqB,GAASA,EAAM55B,KAAKy5B,IAAe,EA0C3HI,GAAY,CAChBC,UAAWtC,GACXuC,QAAS,CAAC,EAEVC,WAAY,GACZxwB,MAAM,EACNywB,UAAU,EACVC,WAAY,KACZC,SAAU,eAENC,GAAgB,CACpBN,UAAW,SACXC,QAAS,SACTC,WAAY,oBACZxwB,KAAM,UACNywB,SAAU,UACVC,WAAY,kBACZC,SAAU,UAENE,GAAqB,CACzBC,MAAO,iCACP5jB,SAAU,oBAOZ,MAAM6jB,WAAwBna,GAC5B,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,EACjC,CAGA,kBAAWJ,GACT,OAAOwZ,EACT,CACA,sBAAWvZ,GACT,OAAO8Z,EACT,CACA,eAAWlhB,GACT,MA3CW,iBA4Cb,CAGA,UAAAshB,GACE,OAAO7gC,OAAOmiB,OAAOa,KAAK6E,QAAQuY,SAASt6B,KAAIghB,GAAU9D,KAAK8d,yBAAyBha,KAAS3d,OAAO2a,QACzG,CACA,UAAAid,GACE,OAAO/d,KAAK6d,aAAantB,OAAS,CACpC,CACA,aAAAstB,CAAcZ,GAMZ,OALApd,KAAKie,cAAcb,GACnBpd,KAAK6E,QAAQuY,QAAU,IAClBpd,KAAK6E,QAAQuY,WACbA,GAEEpd,IACT,CACA,MAAAke,GACE,MAAMC,EAAkB94B,SAASwvB,cAAc,OAC/CsJ,EAAgBC,UAAYpe,KAAKqe,eAAere,KAAK6E,QAAQ2Y,UAC7D,IAAK,MAAOzjB,EAAUukB,KAASthC,OAAOmkB,QAAQnB,KAAK6E,QAAQuY,SACzDpd,KAAKue,YAAYJ,EAAiBG,EAAMvkB,GAE1C,MAAMyjB,EAAWW,EAAgBpY,SAAS,GACpCsX,EAAard,KAAK8d,yBAAyB9d,KAAK6E,QAAQwY,YAI9D,OAHIA,GACFG,EAASniB,UAAU5E,OAAO4mB,EAAWn7B,MAAM,MAEtCs7B,CACT,CAGA,gBAAAvZ,CAAiBH,GACfa,MAAMV,iBAAiBH,GACvB9D,KAAKie,cAAcna,EAAOsZ,QAC5B,CACA,aAAAa,CAAcO,GACZ,IAAK,MAAOzkB,EAAUqjB,KAAYpgC,OAAOmkB,QAAQqd,GAC/C7Z,MAAMV,iBAAiB,CACrBlK,WACA4jB,MAAOP,GACNM,GAEP,CACA,WAAAa,CAAYf,EAAUJ,EAASrjB,GAC7B,MAAM0kB,EAAkB5Y,GAAeC,QAAQ/L,EAAUyjB,GACpDiB,KAGLrB,EAAUpd,KAAK8d,yBAAyBV,IAKpC,GAAUA,GACZpd,KAAK0e,sBAAsBhkB,GAAW0iB,GAAUqB,GAG9Cze,KAAK6E,QAAQhY,KACf4xB,EAAgBL,UAAYpe,KAAKqe,eAAejB,GAGlDqB,EAAgBE,YAAcvB,EAX5BqB,EAAgB9kB,SAYpB,CACA,cAAA0kB,CAAeG,GACb,OAAOxe,KAAK6E,QAAQyY,SApJxB,SAAsBsB,EAAYzB,EAAW0B,GAC3C,IAAKD,EAAWluB,OACd,OAAOkuB,EAET,GAAIC,GAAgD,mBAArBA,EAC7B,OAAOA,EAAiBD,GAE1B,MACME,GADY,IAAIl/B,OAAOm/B,WACKC,gBAAgBJ,EAAY,aACxD/9B,EAAW,GAAGlC,UAAUmgC,EAAgB5yB,KAAKkU,iBAAiB,MACpE,IAAK,MAAM7gB,KAAWsB,EAAU,CAC9B,MAAMo+B,EAAc1/B,EAAQC,SAASC,cACrC,IAAKzC,OAAO4D,KAAKu8B,GAAW/b,SAAS6d,GAAc,CACjD1/B,EAAQoa,SACR,QACF,CACA,MAAMulB,EAAgB,GAAGvgC,UAAUY,EAAQ0B,YACrCk+B,EAAoB,GAAGxgC,OAAOw+B,EAAU,MAAQ,GAAIA,EAAU8B,IAAgB,IACpF,IAAK,MAAMl9B,KAAam9B,EACjBtC,GAAiB76B,EAAWo9B,IAC/B5/B,EAAQ4B,gBAAgBY,EAAUvC,SAGxC,CACA,OAAOs/B,EAAgB5yB,KAAKkyB,SAC9B,CA2HmCgB,CAAaZ,EAAKxe,KAAK6E,QAAQsY,UAAWnd,KAAK6E,QAAQ0Y,YAAciB,CACtG,CACA,wBAAAV,CAAyBU,GACvB,OAAO3hB,GAAQ2hB,EAAK,CAACxe,MACvB,CACA,qBAAA0e,CAAsBn/B,EAASk/B,GAC7B,GAAIze,KAAK6E,QAAQhY,KAGf,OAFA4xB,EAAgBL,UAAY,QAC5BK,EAAgB3J,OAAOv1B,GAGzBk/B,EAAgBE,YAAcp/B,EAAQo/B,WACxC,EAeF,MACMU,GAAwB,IAAI/oB,IAAI,CAAC,WAAY,YAAa,eAC1DgpB,GAAoB,OAEpBC,GAAoB,OACpBC,GAAyB,iBACzBC,GAAiB,SACjBC,GAAmB,gBACnBC,GAAgB,QAChBC,GAAgB,QAahBC,GAAgB,CACpBC,KAAM,OACNC,IAAK,MACLC,MAAO/jB,KAAU,OAAS,QAC1BgkB,OAAQ,SACRC,KAAMjkB,KAAU,QAAU,QAEtBkkB,GAAY,CAChBhD,UAAWtC,GACXuF,WAAW,EACXnyB,SAAU,kBACVoyB,WAAW,EACXC,YAAa,GACbC,MAAO,EACPvwB,mBAAoB,CAAC,MAAO,QAAS,SAAU,QAC/CnD,MAAM,EACN7E,OAAQ,CAAC,EAAG,GACZtJ,UAAW,MACXszB,aAAc,KACdsL,UAAU,EACVC,WAAY,KACZxjB,UAAU,EACVyjB,SAAU,+GACVgD,MAAO,GACP5e,QAAS,eAEL6e,GAAgB,CACpBtD,UAAW,SACXiD,UAAW,UACXnyB,SAAU,mBACVoyB,UAAW,2BACXC,YAAa,oBACbC,MAAO,kBACPvwB,mBAAoB,QACpBnD,KAAM,UACN7E,OAAQ,0BACRtJ,UAAW,oBACXszB,aAAc,yBACdsL,SAAU,UACVC,WAAY,kBACZxjB,SAAU,mBACVyjB,SAAU,SACVgD,MAAO,4BACP5e,QAAS,UAOX,MAAM8e,WAAgBhc,GACpB,WAAAP,CAAY5kB,EAASukB,GACnB,QAAsB,IAAX,EACT,MAAM,IAAIU,UAAU,+DAEtBG,MAAMplB,EAASukB,GAGf9D,KAAK2gB,YAAa,EAClB3gB,KAAK4gB,SAAW,EAChB5gB,KAAK6gB,WAAa,KAClB7gB,KAAK8gB,eAAiB,CAAC,EACvB9gB,KAAKmS,QAAU,KACfnS,KAAK+gB,iBAAmB,KACxB/gB,KAAKghB,YAAc,KAGnBhhB,KAAKihB,IAAM,KACXjhB,KAAKkhB,gBACAlhB,KAAK6E,QAAQ9K,UAChBiG,KAAKmhB,WAET,CAGA,kBAAWzd,GACT,OAAOyc,EACT,CACA,sBAAWxc,GACT,OAAO8c,EACT,CACA,eAAWlkB,GACT,MAxGW,SAyGb,CAGA,MAAA6kB,GACEphB,KAAK2gB,YAAa,CACpB,CACA,OAAAU,GACErhB,KAAK2gB,YAAa,CACpB,CACA,aAAAW,GACEthB,KAAK2gB,YAAc3gB,KAAK2gB,UAC1B,CACA,MAAAhZ,GACO3H,KAAK2gB,aAGV3gB,KAAK8gB,eAAeS,OAASvhB,KAAK8gB,eAAeS,MAC7CvhB,KAAK2P,WACP3P,KAAKwhB,SAGPxhB,KAAKyhB,SACP,CACA,OAAA1c,GACEmI,aAAalN,KAAK4gB,UAClBrgB,GAAaC,IAAIR,KAAK4E,SAAS5J,QAAQykB,IAAiBC,GAAkB1f,KAAK0hB,mBAC3E1hB,KAAK4E,SAASpJ,aAAa,2BAC7BwE,KAAK4E,SAASxjB,aAAa,QAAS4e,KAAK4E,SAASpJ,aAAa,2BAEjEwE,KAAK2hB,iBACLhd,MAAMI,SACR,CACA,IAAA8K,GACE,GAAoC,SAAhC7P,KAAK4E,SAAS7jB,MAAMgxB,QACtB,MAAM,IAAInO,MAAM,uCAElB,IAAM5D,KAAK4hB,mBAAoB5hB,KAAK2gB,WAClC,OAEF,MAAMnH,EAAYjZ,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAlItD,SAoIXqc,GADapmB,GAAeuE,KAAK4E,WACL5E,KAAK4E,SAAS9kB,cAAcwF,iBAAiBd,SAASwb,KAAK4E,UAC7F,GAAI4U,EAAUxX,mBAAqB6f,EACjC,OAIF7hB,KAAK2hB,iBACL,MAAMV,EAAMjhB,KAAK8hB,iBACjB9hB,KAAK4E,SAASxjB,aAAa,mBAAoB6/B,EAAIzlB,aAAa,OAChE,MAAM,UACJ6kB,GACErgB,KAAK6E,QAYT,GAXK7E,KAAK4E,SAAS9kB,cAAcwF,gBAAgBd,SAASwb,KAAKihB,OAC7DZ,EAAUvL,OAAOmM,GACjB1gB,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhJpC,cAkJnBxF,KAAKmS,QAAUnS,KAAKwS,cAAcyO,GAClCA,EAAI5lB,UAAU5E,IAAI8oB,IAMd,iBAAkBl6B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAac,GAAG9hB,EAAS,YAAaqc,IAU1CoE,KAAKmF,gBAPY,KACf5E,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhKrC,WAiKQ,IAApBxF,KAAK6gB,YACP7gB,KAAKwhB,SAEPxhB,KAAK6gB,YAAa,CAAK,GAEK7gB,KAAKihB,IAAKjhB,KAAKgO,cAC/C,CACA,IAAA4B,GACE,GAAK5P,KAAK2P,aAGQpP,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UA/KtD,SAgLHxD,iBAAd,CAQA,GALYhC,KAAK8hB,iBACbzmB,UAAU1B,OAAO4lB,IAIjB,iBAAkBl6B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK6Z,UAC/CxF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAG3CoE,KAAK8gB,eAA4B,OAAI,EACrC9gB,KAAK8gB,eAAelB,KAAiB,EACrC5f,KAAK8gB,eAAenB,KAAiB,EACrC3f,KAAK6gB,WAAa,KAYlB7gB,KAAKmF,gBAVY,KACXnF,KAAK+hB,yBAGJ/hB,KAAK6gB,YACR7gB,KAAK2hB,iBAEP3hB,KAAK4E,SAASzjB,gBAAgB,oBAC9Bof,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAzMpC,WAyM8D,GAEnDxF,KAAKihB,IAAKjhB,KAAKgO,cA1B7C,CA2BF,CACA,MAAAjjB,GACMiV,KAAKmS,SACPnS,KAAKmS,QAAQpnB,QAEjB,CAGA,cAAA62B,GACE,OAAO9gB,QAAQd,KAAKgiB,YACtB,CACA,cAAAF,GAIE,OAHK9hB,KAAKihB,MACRjhB,KAAKihB,IAAMjhB,KAAKiiB,kBAAkBjiB,KAAKghB,aAAehhB,KAAKkiB,2BAEtDliB,KAAKihB,GACd,CACA,iBAAAgB,CAAkB7E,GAChB,MAAM6D,EAAMjhB,KAAKmiB,oBAAoB/E,GAASc,SAG9C,IAAK+C,EACH,OAAO,KAETA,EAAI5lB,UAAU1B,OAAO2lB,GAAmBC,IAExC0B,EAAI5lB,UAAU5E,IAAI,MAAMuJ,KAAKmE,YAAY5H,aACzC,MAAM6lB,EAvuGKC,KACb,GACEA,GAAUlgC,KAAKmgC,MA/BH,IA+BSngC,KAAKogC,gBACnBl9B,SAASm9B,eAAeH,IACjC,OAAOA,CAAM,EAmuGGI,CAAOziB,KAAKmE,YAAY5H,MAAM1c,WAK5C,OAJAohC,EAAI7/B,aAAa,KAAMghC,GACnBpiB,KAAKgO,eACPiT,EAAI5lB,UAAU5E,IAAI6oB,IAEb2B,CACT,CACA,UAAAyB,CAAWtF,GACTpd,KAAKghB,YAAc5D,EACfpd,KAAK2P,aACP3P,KAAK2hB,iBACL3hB,KAAK6P,OAET,CACA,mBAAAsS,CAAoB/E,GAYlB,OAXIpd,KAAK+gB,iBACP/gB,KAAK+gB,iBAAiB/C,cAAcZ,GAEpCpd,KAAK+gB,iBAAmB,IAAInD,GAAgB,IACvC5d,KAAK6E,QAGRuY,UACAC,WAAYrd,KAAK8d,yBAAyB9d,KAAK6E,QAAQyb,eAGpDtgB,KAAK+gB,gBACd,CACA,sBAAAmB,GACE,MAAO,CACL,CAAC1C,IAAyBxf,KAAKgiB,YAEnC,CACA,SAAAA,GACE,OAAOhiB,KAAK8d,yBAAyB9d,KAAK6E,QAAQ2b,QAAUxgB,KAAK4E,SAASpJ,aAAa,yBACzF,CAGA,4BAAAmnB,CAA6BvjB,GAC3B,OAAOY,KAAKmE,YAAYmB,oBAAoBlG,EAAMW,eAAgBC,KAAK4iB,qBACzE,CACA,WAAA5U,GACE,OAAOhO,KAAK6E,QAAQub,WAAapgB,KAAKihB,KAAOjhB,KAAKihB,IAAI5lB,UAAU7W,SAAS86B,GAC3E,CACA,QAAA3P,GACE,OAAO3P,KAAKihB,KAAOjhB,KAAKihB,IAAI5lB,UAAU7W,SAAS+6B,GACjD,CACA,aAAA/M,CAAcyO,GACZ,MAAMviC,EAAYme,GAAQmD,KAAK6E,QAAQnmB,UAAW,CAACshB,KAAMihB,EAAKjhB,KAAK4E,WAC7Die,EAAahD,GAAcnhC,EAAU+lB,eAC3C,OAAO,GAAoBzE,KAAK4E,SAAUqc,EAAKjhB,KAAK4S,iBAAiBiQ,GACvE,CACA,UAAA7P,GACE,MAAM,OACJhrB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAOgQ,SAAS5vB,EAAO,MAEzC,mBAAXqK,EACFirB,GAAcjrB,EAAOirB,EAAYjT,KAAK4E,UAExC5c,CACT,CACA,wBAAA81B,CAAyBU,GACvB,OAAO3hB,GAAQ2hB,EAAK,CAACxe,KAAK4E,UAC5B,CACA,gBAAAgO,CAAiBiQ,GACf,MAAM3P,EAAwB,CAC5Bx0B,UAAWmkC,EACXzsB,UAAW,CAAC,CACV9V,KAAM,OACNmB,QAAS,CACPuO,mBAAoBgQ,KAAK6E,QAAQ7U,qBAElC,CACD1P,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAKgT,eAEd,CACD1yB,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,QACNmB,QAAS,CACPlC,QAAS,IAAIygB,KAAKmE,YAAY5H,eAE/B,CACDjc,KAAM,kBACNC,SAAS,EACTC,MAAO,aACPC,GAAI4J,IAGF2V,KAAK8hB,iBAAiB1gC,aAAa,wBAAyBiJ,EAAK1J,MAAMjC,UAAU,KAIvF,MAAO,IACFw0B,KACArW,GAAQmD,KAAK6E,QAAQmN,aAAc,CAACkB,IAE3C,CACA,aAAAgO,GACE,MAAM4B,EAAW9iB,KAAK6E,QAAQjD,QAAQ1f,MAAM,KAC5C,IAAK,MAAM0f,KAAWkhB,EACpB,GAAgB,UAAZlhB,EACFrB,GAAac,GAAGrB,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAjVlC,SAiV4DxF,KAAK6E,QAAQ9K,UAAUqF,IAC/EY,KAAK2iB,6BAA6BvjB,GAC1CuI,QAAQ,SAEb,GA3VU,WA2VN/F,EAA4B,CACrC,MAAMmhB,EAAUnhB,IAAY+d,GAAgB3f,KAAKmE,YAAYqB,UAnV5C,cAmV0ExF,KAAKmE,YAAYqB,UArV5F,WAsVVwd,EAAWphB,IAAY+d,GAAgB3f,KAAKmE,YAAYqB,UAnV7C,cAmV2ExF,KAAKmE,YAAYqB,UArV5F,YAsVjBjF,GAAac,GAAGrB,KAAK4E,SAAUme,EAAS/iB,KAAK6E,QAAQ9K,UAAUqF,IAC7D,MAAMkU,EAAUtT,KAAK2iB,6BAA6BvjB,GAClDkU,EAAQwN,eAA8B,YAAf1hB,EAAMqB,KAAqBmf,GAAgBD,KAAiB,EACnFrM,EAAQmO,QAAQ,IAElBlhB,GAAac,GAAGrB,KAAK4E,SAAUoe,EAAUhjB,KAAK6E,QAAQ9K,UAAUqF,IAC9D,MAAMkU,EAAUtT,KAAK2iB,6BAA6BvjB,GAClDkU,EAAQwN,eAA8B,aAAf1hB,EAAMqB,KAAsBmf,GAAgBD,IAAiBrM,EAAQ1O,SAASpgB,SAAS4a,EAAMU,eACpHwT,EAAQkO,QAAQ,GAEpB,CAEFxhB,KAAK0hB,kBAAoB,KACnB1hB,KAAK4E,UACP5E,KAAK4P,MACP,EAEFrP,GAAac,GAAGrB,KAAK4E,SAAS5J,QAAQykB,IAAiBC,GAAkB1f,KAAK0hB,kBAChF,CACA,SAAAP,GACE,MAAMX,EAAQxgB,KAAK4E,SAASpJ,aAAa,SACpCglB,IAGAxgB,KAAK4E,SAASpJ,aAAa,eAAkBwE,KAAK4E,SAAS+Z,YAAYhZ,QAC1E3F,KAAK4E,SAASxjB,aAAa,aAAco/B,GAE3CxgB,KAAK4E,SAASxjB,aAAa,yBAA0Bo/B,GACrDxgB,KAAK4E,SAASzjB,gBAAgB,SAChC,CACA,MAAAsgC,GACMzhB,KAAK2P,YAAc3P,KAAK6gB,WAC1B7gB,KAAK6gB,YAAa,GAGpB7gB,KAAK6gB,YAAa,EAClB7gB,KAAKijB,aAAY,KACXjjB,KAAK6gB,YACP7gB,KAAK6P,MACP,GACC7P,KAAK6E,QAAQ0b,MAAM1Q,MACxB,CACA,MAAA2R,GACMxhB,KAAK+hB,yBAGT/hB,KAAK6gB,YAAa,EAClB7gB,KAAKijB,aAAY,KACVjjB,KAAK6gB,YACR7gB,KAAK4P,MACP,GACC5P,KAAK6E,QAAQ0b,MAAM3Q,MACxB,CACA,WAAAqT,CAAYrlB,EAASslB,GACnBhW,aAAalN,KAAK4gB,UAClB5gB,KAAK4gB,SAAW/iB,WAAWD,EAASslB,EACtC,CACA,oBAAAnB,GACE,OAAO/kC,OAAOmiB,OAAOa,KAAK8gB,gBAAgB1f,UAAS,EACrD,CACA,UAAAyC,CAAWC,GACT,MAAMqf,EAAiBngB,GAAYG,kBAAkBnD,KAAK4E,UAC1D,IAAK,MAAMwe,KAAiBpmC,OAAO4D,KAAKuiC,GAClC9D,GAAsB1oB,IAAIysB,WACrBD,EAAeC,GAU1B,OAPAtf,EAAS,IACJqf,KACmB,iBAAXrf,GAAuBA,EAASA,EAAS,CAAC,GAEvDA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAchB,OAbAA,EAAOuc,WAAiC,IAArBvc,EAAOuc,UAAsBh7B,SAAS6G,KAAOwO,GAAWoJ,EAAOuc,WACtD,iBAAjBvc,EAAOyc,QAChBzc,EAAOyc,MAAQ,CACb1Q,KAAM/L,EAAOyc,MACb3Q,KAAM9L,EAAOyc,QAGW,iBAAjBzc,EAAO0c,QAChB1c,EAAO0c,MAAQ1c,EAAO0c,MAAM3gC,YAEA,iBAAnBikB,EAAOsZ,UAChBtZ,EAAOsZ,QAAUtZ,EAAOsZ,QAAQv9B,YAE3BikB,CACT,CACA,kBAAA8e,GACE,MAAM9e,EAAS,CAAC,EAChB,IAAK,MAAOhnB,EAAKa,KAAUX,OAAOmkB,QAAQnB,KAAK6E,SACzC7E,KAAKmE,YAAYT,QAAQ5mB,KAASa,IACpCmmB,EAAOhnB,GAAOa,GASlB,OANAmmB,EAAO/J,UAAW,EAClB+J,EAAOlC,QAAU,SAKVkC,CACT,CACA,cAAA6d,GACM3hB,KAAKmS,UACPnS,KAAKmS,QAAQnZ,UACbgH,KAAKmS,QAAU,MAEbnS,KAAKihB,MACPjhB,KAAKihB,IAAItnB,SACTqG,KAAKihB,IAAM,KAEf,CAGA,sBAAOxkB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOq2B,GAAQpb,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmBukB,IAcnB,MACM2C,GAAiB,kBACjBC,GAAmB,gBACnBC,GAAY,IACb7C,GAAQhd,QACX0Z,QAAS,GACTp1B,OAAQ,CAAC,EAAG,GACZtJ,UAAW,QACX8+B,SAAU,8IACV5b,QAAS,SAEL4hB,GAAgB,IACjB9C,GAAQ/c,YACXyZ,QAAS,kCAOX,MAAMqG,WAAgB/C,GAEpB,kBAAWhd,GACT,OAAO6f,EACT,CACA,sBAAW5f,GACT,OAAO6f,EACT,CACA,eAAWjnB,GACT,MA7BW,SA8Bb,CAGA,cAAAqlB,GACE,OAAO5hB,KAAKgiB,aAAehiB,KAAK0jB,aAClC,CAGA,sBAAAxB,GACE,MAAO,CACL,CAACmB,IAAiBrjB,KAAKgiB,YACvB,CAACsB,IAAmBtjB,KAAK0jB,cAE7B,CACA,WAAAA,GACE,OAAO1jB,KAAK8d,yBAAyB9d,KAAK6E,QAAQuY,QACpD,CAGA,sBAAO3gB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOo5B,GAAQne,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmBsnB,IAcnB,MAEME,GAAc,gBAEdC,GAAiB,WAAWD,KAC5BE,GAAc,QAAQF,KACtBG,GAAwB,OAAOH,cAE/BI,GAAsB,SAEtBC,GAAwB,SAExBC,GAAqB,YAGrBC,GAAsB,GAAGD,mBAA+CA,uBAGxEE,GAAY,CAChBn8B,OAAQ,KAERo8B,WAAY,eACZC,cAAc,EACd93B,OAAQ,KACR+3B,UAAW,CAAC,GAAK,GAAK,IAElBC,GAAgB,CACpBv8B,OAAQ,gBAERo8B,WAAY,SACZC,aAAc,UACd93B,OAAQ,UACR+3B,UAAW,SAOb,MAAME,WAAkB9f,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GAGf9D,KAAKykB,aAAe,IAAIvzB,IACxB8O,KAAK0kB,oBAAsB,IAAIxzB,IAC/B8O,KAAK2kB,aAA6D,YAA9C1/B,iBAAiB+a,KAAK4E,UAAU5Y,UAA0B,KAAOgU,KAAK4E,SAC1F5E,KAAK4kB,cAAgB,KACrB5kB,KAAK6kB,UAAY,KACjB7kB,KAAK8kB,oBAAsB,CACzBC,gBAAiB,EACjBC,gBAAiB,GAEnBhlB,KAAKilB,SACP,CAGA,kBAAWvhB,GACT,OAAOygB,EACT,CACA,sBAAWxgB,GACT,OAAO4gB,EACT,CACA,eAAWhoB,GACT,MAhEW,WAiEb,CAGA,OAAA0oB,GACEjlB,KAAKklB,mCACLllB,KAAKmlB,2BACDnlB,KAAK6kB,UACP7kB,KAAK6kB,UAAUO,aAEfplB,KAAK6kB,UAAY7kB,KAAKqlB,kBAExB,IAAK,MAAMC,KAAWtlB,KAAK0kB,oBAAoBvlB,SAC7Ca,KAAK6kB,UAAUU,QAAQD,EAE3B,CACA,OAAAvgB,GACE/E,KAAK6kB,UAAUO,aACfzgB,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAShB,OAPAA,EAAOvX,OAASmO,GAAWoJ,EAAOvX,SAAWlH,SAAS6G,KAGtD4X,EAAOsgB,WAAatgB,EAAO9b,OAAS,GAAG8b,EAAO9b,oBAAsB8b,EAAOsgB,WAC3C,iBAArBtgB,EAAOwgB,YAChBxgB,EAAOwgB,UAAYxgB,EAAOwgB,UAAUpiC,MAAM,KAAKY,KAAInF,GAAS4f,OAAOC,WAAW7f,MAEzEmmB,CACT,CACA,wBAAAqhB,GACOnlB,KAAK6E,QAAQwf,eAKlB9jB,GAAaC,IAAIR,KAAK6E,QAAQtY,OAAQs3B,IACtCtjB,GAAac,GAAGrB,KAAK6E,QAAQtY,OAAQs3B,GAAaG,IAAuB5kB,IACvE,MAAMomB,EAAoBxlB,KAAK0kB,oBAAoBvnC,IAAIiiB,EAAM7S,OAAOtB,MACpE,GAAIu6B,EAAmB,CACrBpmB,EAAMkD,iBACN,MAAM3G,EAAOqE,KAAK2kB,cAAgB/kC,OAC5BmE,EAASyhC,EAAkBnhC,UAAY2b,KAAK4E,SAASvgB,UAC3D,GAAIsX,EAAK8pB,SAKP,YAJA9pB,EAAK8pB,SAAS,CACZ9jC,IAAKoC,EACL2hC,SAAU,WAMd/pB,EAAKlQ,UAAY1H,CACnB,KAEJ,CACA,eAAAshC,GACE,MAAM5jC,EAAU,CACdka,KAAMqE,KAAK2kB,aACXL,UAAWtkB,KAAK6E,QAAQyf,UACxBF,WAAYpkB,KAAK6E,QAAQuf,YAE3B,OAAO,IAAIuB,sBAAqBxkB,GAAWnB,KAAK4lB,kBAAkBzkB,IAAU1f,EAC9E,CAGA,iBAAAmkC,CAAkBzkB,GAChB,MAAM0kB,EAAgBlI,GAAS3d,KAAKykB,aAAatnC,IAAI,IAAIwgC,EAAMpxB,OAAO4N,MAChEub,EAAWiI,IACf3d,KAAK8kB,oBAAoBC,gBAAkBpH,EAAMpxB,OAAOlI,UACxD2b,KAAK8lB,SAASD,EAAclI,GAAO,EAE/BqH,GAAmBhlB,KAAK2kB,cAAgBt/B,SAASC,iBAAiBmG,UAClEs6B,EAAkBf,GAAmBhlB,KAAK8kB,oBAAoBE,gBACpEhlB,KAAK8kB,oBAAoBE,gBAAkBA,EAC3C,IAAK,MAAMrH,KAASxc,EAAS,CAC3B,IAAKwc,EAAMqI,eAAgB,CACzBhmB,KAAK4kB,cAAgB,KACrB5kB,KAAKimB,kBAAkBJ,EAAclI,IACrC,QACF,CACA,MAAMuI,EAA2BvI,EAAMpxB,OAAOlI,WAAa2b,KAAK8kB,oBAAoBC,gBAEpF,GAAIgB,GAAmBG,GAGrB,GAFAxQ,EAASiI,IAEJqH,EACH,YAMCe,GAAoBG,GACvBxQ,EAASiI,EAEb,CACF,CACA,gCAAAuH,GACEllB,KAAKykB,aAAe,IAAIvzB,IACxB8O,KAAK0kB,oBAAsB,IAAIxzB,IAC/B,MAAMi1B,EAActgB,GAAe1T,KAAK6xB,GAAuBhkB,KAAK6E,QAAQtY,QAC5E,IAAK,MAAM65B,KAAUD,EAAa,CAEhC,IAAKC,EAAOn7B,MAAQiQ,GAAWkrB,GAC7B,SAEF,MAAMZ,EAAoB3f,GAAeC,QAAQugB,UAAUD,EAAOn7B,MAAO+U,KAAK4E,UAG1EjK,GAAU6qB,KACZxlB,KAAKykB,aAAa1yB,IAAIs0B,UAAUD,EAAOn7B,MAAOm7B,GAC9CpmB,KAAK0kB,oBAAoB3yB,IAAIq0B,EAAOn7B,KAAMu6B,GAE9C,CACF,CACA,QAAAM,CAASv5B,GACHyT,KAAK4kB,gBAAkBr4B,IAG3ByT,KAAKimB,kBAAkBjmB,KAAK6E,QAAQtY,QACpCyT,KAAK4kB,cAAgBr4B,EACrBA,EAAO8O,UAAU5E,IAAIstB,IACrB/jB,KAAKsmB,iBAAiB/5B,GACtBgU,GAAaqB,QAAQ5B,KAAK4E,SAAUgf,GAAgB,CAClD9jB,cAAevT,IAEnB,CACA,gBAAA+5B,CAAiB/5B,GAEf,GAAIA,EAAO8O,UAAU7W,SA9LQ,iBA+L3BqhB,GAAeC,QArLc,mBAqLsBvZ,EAAOyO,QAtLtC,cAsLkEK,UAAU5E,IAAIstB,SAGtG,IAAK,MAAMwC,KAAa1gB,GAAeI,QAAQ1Z,EA9LnB,qBAiM1B,IAAK,MAAMxJ,KAAQ8iB,GAAeM,KAAKogB,EAAWrC,IAChDnhC,EAAKsY,UAAU5E,IAAIstB,GAGzB,CACA,iBAAAkC,CAAkBxhC,GAChBA,EAAO4W,UAAU1B,OAAOoqB,IACxB,MAAMyC,EAAc3gB,GAAe1T,KAAK,GAAG6xB,MAAyBD,KAAuBt/B,GAC3F,IAAK,MAAM9E,KAAQ6mC,EACjB7mC,EAAK0b,UAAU1B,OAAOoqB,GAE1B,CAGA,sBAAOtnB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAOm6B,GAAUlf,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGzhB,OAAQkkC,IAAuB,KAC7C,IAAK,MAAM2C,KAAO5gB,GAAe1T,KApOT,0BAqOtBqyB,GAAUlf,oBAAoBmhB,EAChC,IAOFtqB,GAAmBqoB,IAcnB,MAEMkC,GAAc,UACdC,GAAe,OAAOD,KACtBE,GAAiB,SAASF,KAC1BG,GAAe,OAAOH,KACtBI,GAAgB,QAAQJ,KACxBK,GAAuB,QAAQL,KAC/BM,GAAgB,UAAUN,KAC1BO,GAAsB,OAAOP,KAC7BQ,GAAiB,YACjBC,GAAkB,aAClBC,GAAe,UACfC,GAAiB,YACjBC,GAAW,OACXC,GAAU,MACVC,GAAoB,SACpBC,GAAoB,OACpBC,GAAoB,OAEpBC,GAA2B,mBAE3BC,GAA+B,QAAQD,MAIvCE,GAAuB,2EACvBC,GAAsB,YAFOF,uBAAiDA,mBAA6CA,OAE/EC,KAC5CE,GAA8B,IAAIP,8BAA6CA,+BAA8CA,4BAMnI,MAAMQ,WAAYtjB,GAChB,WAAAP,CAAY5kB,GACVolB,MAAMplB,GACNygB,KAAKoS,QAAUpS,KAAK4E,SAAS5J,QAdN,uCAelBgF,KAAKoS,UAOVpS,KAAKioB,sBAAsBjoB,KAAKoS,QAASpS,KAAKkoB,gBAC9C3nB,GAAac,GAAGrB,KAAK4E,SAAUoiB,IAAe5nB,GAASY,KAAK6M,SAASzN,KACvE,CAGA,eAAW7C,GACT,MAnDW,KAoDb,CAGA,IAAAsT,GAEE,MAAMsY,EAAYnoB,KAAK4E,SACvB,GAAI5E,KAAKooB,cAAcD,GACrB,OAIF,MAAME,EAASroB,KAAKsoB,iBACdC,EAAYF,EAAS9nB,GAAaqB,QAAQymB,EAAQ1B,GAAc,CACpE7mB,cAAeqoB,IACZ,KACa5nB,GAAaqB,QAAQumB,EAAWtB,GAAc,CAC9D/mB,cAAeuoB,IAEHrmB,kBAAoBumB,GAAaA,EAAUvmB,mBAGzDhC,KAAKwoB,YAAYH,EAAQF,GACzBnoB,KAAKyoB,UAAUN,EAAWE,GAC5B,CAGA,SAAAI,CAAUlpC,EAASmpC,GACZnpC,IAGLA,EAAQ8b,UAAU5E,IAAI+wB,IACtBxnB,KAAKyoB,UAAU5iB,GAAec,uBAAuBpnB,IAcrDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ4B,gBAAgB,YACxB5B,EAAQ6B,aAAa,iBAAiB,GACtC4e,KAAK2oB,gBAAgBppC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAASunC,GAAe,CAC3ChnB,cAAe4oB,KAPfnpC,EAAQ8b,UAAU5E,IAAIixB,GAQtB,GAE0BnoC,EAASA,EAAQ8b,UAAU7W,SAASijC,KACpE,CACA,WAAAe,CAAYjpC,EAASmpC,GACdnpC,IAGLA,EAAQ8b,UAAU1B,OAAO6tB,IACzBjoC,EAAQq7B,OACR5a,KAAKwoB,YAAY3iB,GAAec,uBAAuBpnB,IAcvDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ6B,aAAa,iBAAiB,GACtC7B,EAAQ6B,aAAa,WAAY,MACjC4e,KAAK2oB,gBAAgBppC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAASqnC,GAAgB,CAC5C9mB,cAAe4oB,KAPfnpC,EAAQ8b,UAAU1B,OAAO+tB,GAQzB,GAE0BnoC,EAASA,EAAQ8b,UAAU7W,SAASijC,KACpE,CACA,QAAA5a,CAASzN,GACP,IAAK,CAAC8nB,GAAgBC,GAAiBC,GAAcC,GAAgBC,GAAUC,IAASnmB,SAAShC,EAAMtiB,KACrG,OAEFsiB,EAAM0U,kBACN1U,EAAMkD,iBACN,MAAMyD,EAAW/F,KAAKkoB,eAAe/hC,QAAO5G,IAAY2b,GAAW3b,KACnE,IAAIqpC,EACJ,GAAI,CAACtB,GAAUC,IAASnmB,SAAShC,EAAMtiB,KACrC8rC,EAAoB7iB,EAAS3G,EAAMtiB,MAAQwqC,GAAW,EAAIvhB,EAASrV,OAAS,OACvE,CACL,MAAM8c,EAAS,CAAC2Z,GAAiBE,IAAgBjmB,SAAShC,EAAMtiB,KAChE8rC,EAAoB9qB,GAAqBiI,EAAU3G,EAAM7S,OAAQihB,GAAQ,EAC3E,CACIob,IACFA,EAAkBnW,MAAM,CACtBoW,eAAe,IAEjBb,GAAI1iB,oBAAoBsjB,GAAmB/Y,OAE/C,CACA,YAAAqY,GAEE,OAAOriB,GAAe1T,KAAK21B,GAAqB9nB,KAAKoS,QACvD,CACA,cAAAkW,GACE,OAAOtoB,KAAKkoB,eAAe/1B,MAAKzN,GAASsb,KAAKooB,cAAc1jC,MAAW,IACzE,CACA,qBAAAujC,CAAsBxjC,EAAQshB,GAC5B/F,KAAK8oB,yBAAyBrkC,EAAQ,OAAQ,WAC9C,IAAK,MAAMC,KAASqhB,EAClB/F,KAAK+oB,6BAA6BrkC,EAEtC,CACA,4BAAAqkC,CAA6BrkC,GAC3BA,EAAQsb,KAAKgpB,iBAAiBtkC,GAC9B,MAAMukC,EAAWjpB,KAAKooB,cAAc1jC,GAC9BwkC,EAAYlpB,KAAKmpB,iBAAiBzkC,GACxCA,EAAMtD,aAAa,gBAAiB6nC,GAChCC,IAAcxkC,GAChBsb,KAAK8oB,yBAAyBI,EAAW,OAAQ,gBAE9CD,GACHvkC,EAAMtD,aAAa,WAAY,MAEjC4e,KAAK8oB,yBAAyBpkC,EAAO,OAAQ,OAG7Csb,KAAKopB,mCAAmC1kC,EAC1C,CACA,kCAAA0kC,CAAmC1kC,GACjC,MAAM6H,EAASsZ,GAAec,uBAAuBjiB,GAChD6H,IAGLyT,KAAK8oB,yBAAyBv8B,EAAQ,OAAQ,YAC1C7H,EAAMyV,IACR6F,KAAK8oB,yBAAyBv8B,EAAQ,kBAAmB,GAAG7H,EAAMyV,MAEtE,CACA,eAAAwuB,CAAgBppC,EAAS8pC,GACvB,MAAMH,EAAYlpB,KAAKmpB,iBAAiB5pC,GACxC,IAAK2pC,EAAU7tB,UAAU7W,SApKN,YAqKjB,OAEF,MAAMmjB,EAAS,CAAC5N,EAAUoa,KACxB,MAAM50B,EAAUsmB,GAAeC,QAAQ/L,EAAUmvB,GAC7C3pC,GACFA,EAAQ8b,UAAUsM,OAAOwM,EAAWkV,EACtC,EAEF1hB,EAAOggB,GAA0BH,IACjC7f,EA5K2B,iBA4KI+f,IAC/BwB,EAAU9nC,aAAa,gBAAiBioC,EAC1C,CACA,wBAAAP,CAAyBvpC,EAASwC,EAAWpE,GACtC4B,EAAQgc,aAAaxZ,IACxBxC,EAAQ6B,aAAaW,EAAWpE,EAEpC,CACA,aAAAyqC,CAAc9Y,GACZ,OAAOA,EAAKjU,UAAU7W,SAASgjC,GACjC,CAGA,gBAAAwB,CAAiB1Z,GACf,OAAOA,EAAKtJ,QAAQ8hB,IAAuBxY,EAAOzJ,GAAeC,QAAQgiB,GAAqBxY,EAChG,CAGA,gBAAA6Z,CAAiB7Z,GACf,OAAOA,EAAKtU,QA5LO,gCA4LoBsU,CACzC,CAGA,sBAAO7S,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAO29B,GAAI1iB,oBAAoBtF,MACrC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGhc,SAAU0hC,GAAsBc,IAAsB,SAAUzoB,GAC1E,CAAC,IAAK,QAAQgC,SAASpB,KAAKiH,UAC9B7H,EAAMkD,iBAEJpH,GAAW8E,OAGfgoB,GAAI1iB,oBAAoBtF,MAAM6P,MAChC,IAKAtP,GAAac,GAAGzhB,OAAQqnC,IAAqB,KAC3C,IAAK,MAAM1nC,KAAWsmB,GAAe1T,KAAK41B,IACxCC,GAAI1iB,oBAAoB/lB,EAC1B,IAMF4c,GAAmB6rB,IAcnB,MAEMhjB,GAAY,YACZskB,GAAkB,YAAYtkB,KAC9BukB,GAAiB,WAAWvkB,KAC5BwkB,GAAgB,UAAUxkB,KAC1BykB,GAAiB,WAAWzkB,KAC5B0kB,GAAa,OAAO1kB,KACpB2kB,GAAe,SAAS3kB,KACxB4kB,GAAa,OAAO5kB,KACpB6kB,GAAc,QAAQ7kB,KAEtB8kB,GAAkB,OAClBC,GAAkB,OAClBC,GAAqB,UACrBrmB,GAAc,CAClByc,UAAW,UACX6J,SAAU,UACV1J,MAAO,UAEH7c,GAAU,CACd0c,WAAW,EACX6J,UAAU,EACV1J,MAAO,KAOT,MAAM2J,WAAcxlB,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAK4gB,SAAW,KAChB5gB,KAAKmqB,sBAAuB,EAC5BnqB,KAAKoqB,yBAA0B,EAC/BpqB,KAAKkhB,eACP,CAGA,kBAAWxd,GACT,OAAOA,EACT,CACA,sBAAWC,GACT,OAAOA,EACT,CACA,eAAWpH,GACT,MA/CS,OAgDX,CAGA,IAAAsT,GACoBtP,GAAaqB,QAAQ5B,KAAK4E,SAAUglB,IACxC5nB,mBAGdhC,KAAKqqB,gBACDrqB,KAAK6E,QAAQub,WACfpgB,KAAK4E,SAASvJ,UAAU5E,IA/CN,QAsDpBuJ,KAAK4E,SAASvJ,UAAU1B,OAAOmwB,IAC/BjuB,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIszB,GAAiBC,IAC7ChqB,KAAKmF,gBARY,KACfnF,KAAK4E,SAASvJ,UAAU1B,OAAOqwB,IAC/BzpB,GAAaqB,QAAQ5B,KAAK4E,SAAUilB,IACpC7pB,KAAKsqB,oBAAoB,GAKGtqB,KAAK4E,SAAU5E,KAAK6E,QAAQub,WAC5D,CACA,IAAAxQ,GACO5P,KAAKuqB,YAGQhqB,GAAaqB,QAAQ5B,KAAK4E,SAAU8kB,IACxC1nB,mBAQdhC,KAAK4E,SAASvJ,UAAU5E,IAAIuzB,IAC5BhqB,KAAKmF,gBANY,KACfnF,KAAK4E,SAASvJ,UAAU5E,IAAIqzB,IAC5B9pB,KAAK4E,SAASvJ,UAAU1B,OAAOqwB,GAAoBD,IACnDxpB,GAAaqB,QAAQ5B,KAAK4E,SAAU+kB,GAAa,GAGrB3pB,KAAK4E,SAAU5E,KAAK6E,QAAQub,YAC5D,CACA,OAAArb,GACE/E,KAAKqqB,gBACDrqB,KAAKuqB,WACPvqB,KAAK4E,SAASvJ,UAAU1B,OAAOowB,IAEjCplB,MAAMI,SACR,CACA,OAAAwlB,GACE,OAAOvqB,KAAK4E,SAASvJ,UAAU7W,SAASulC,GAC1C,CAIA,kBAAAO,GACOtqB,KAAK6E,QAAQolB,WAGdjqB,KAAKmqB,sBAAwBnqB,KAAKoqB,0BAGtCpqB,KAAK4gB,SAAW/iB,YAAW,KACzBmC,KAAK4P,MAAM,GACV5P,KAAK6E,QAAQ0b,QAClB,CACA,cAAAiK,CAAeprB,EAAOqrB,GACpB,OAAQrrB,EAAMqB,MACZ,IAAK,YACL,IAAK,WAEDT,KAAKmqB,qBAAuBM,EAC5B,MAEJ,IAAK,UACL,IAAK,WAEDzqB,KAAKoqB,wBAA0BK,EAIrC,GAAIA,EAEF,YADAzqB,KAAKqqB,gBAGP,MAAM5c,EAAcrO,EAAMU,cACtBE,KAAK4E,WAAa6I,GAAezN,KAAK4E,SAASpgB,SAASipB,IAG5DzN,KAAKsqB,oBACP,CACA,aAAApJ,GACE3gB,GAAac,GAAGrB,KAAK4E,SAAU0kB,IAAiBlqB,GAASY,KAAKwqB,eAAeprB,GAAO,KACpFmB,GAAac,GAAGrB,KAAK4E,SAAU2kB,IAAgBnqB,GAASY,KAAKwqB,eAAeprB,GAAO,KACnFmB,GAAac,GAAGrB,KAAK4E,SAAU4kB,IAAepqB,GAASY,KAAKwqB,eAAeprB,GAAO,KAClFmB,GAAac,GAAGrB,KAAK4E,SAAU6kB,IAAgBrqB,GAASY,KAAKwqB,eAAeprB,GAAO,IACrF,CACA,aAAAirB,GACEnd,aAAalN,KAAK4gB,UAClB5gB,KAAK4gB,SAAW,IAClB,CAGA,sBAAOnkB,CAAgBqH,GACrB,OAAO9D,KAAKwH,MAAK,WACf,MAAMnd,EAAO6/B,GAAM5kB,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KACf,CACF,GACF,ECr0IK,SAAS0qB,GAAcruB,GACD,WAAvBhX,SAASuX,WAAyBP,IACjChX,SAASyF,iBAAiB,mBAAoBuR,EACrD,CDy0IAwK,GAAqBqjB,IAMrB/tB,GAAmB+tB,IEpyInBQ,IAzCA,WAC2B,GAAGt4B,MAAM5U,KAChC6H,SAAS+a,iBAAiB,+BAETtd,KAAI,SAAU6nC,GAC/B,OAAO,IAAI,GAAkBA,EAAkB,CAC7CpK,MAAO,CAAE1Q,KAAM,IAAKD,KAAM,MAE9B,GACF,IAiCA8a,IA5BA,WACYrlC,SAASm9B,eAAe,mBAC9B13B,iBAAiB,SAAS,WAC5BzF,SAAS6G,KAAKT,UAAY,EAC1BpG,SAASC,gBAAgBmG,UAAY,CACvC,GACF,IAuBAi/B,IArBA,WACE,IAAIE,EAAMvlC,SAASm9B,eAAe,mBAC9BqI,EAASxlC,SACVylC,uBAAuB,aAAa,GACpCxnC,wBACH1D,OAAOkL,iBAAiB,UAAU,WAC5BkV,KAAK+qB,UAAY/qB,KAAKgrB,SAAWhrB,KAAKgrB,QAAUH,EAAOjtC,OACzDgtC,EAAI7pC,MAAMgxB,QAAU,QAEpB6Y,EAAI7pC,MAAMgxB,QAAU,OAEtB/R,KAAK+qB,UAAY/qB,KAAKgrB,OACxB,GACF,IAUAprC,OAAOqrC,UAAY","sources":["webpack://pydata_sphinx_theme/webpack/bootstrap","webpack://pydata_sphinx_theme/webpack/runtime/define property getters","webpack://pydata_sphinx_theme/webpack/runtime/hasOwnProperty shorthand","webpack://pydata_sphinx_theme/webpack/runtime/make namespace object","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/enums.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/instanceOf.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/applyStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getBasePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/math.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/userAgent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isLayoutViewport.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getBoundingClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getLayoutRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/contains.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getComputedStyle.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isTableElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getParentNode.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getOffsetParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getMainAxisFromPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/within.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergePaddingObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getFreshSideObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/expandToHashMap.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/arrow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getVariation.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/computeStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/eventListeners.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositeVariationPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScrollBarX.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/listScrollParents.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/rectToClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getClippingRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getViewportRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/detectOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/flip.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeAutoPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/hide.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/offset.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/popperOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/preventOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getAltAxis.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getCompositeRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getHTMLElementScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/orderModifiers.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/createPopper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/debounce.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergeByName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper-lite.js","webpack://pydata_sphinx_theme/./node_modules/bootstrap/dist/js/bootstrap.esm.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/mixin.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/bootstrap.js"],"sourcesContent":["// The require scope\nvar __webpack_require__ = {};\n\n","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","export var top = 'top';\nexport var bottom = 'bottom';\nexport var right = 'right';\nexport var left = 'left';\nexport var auto = 'auto';\nexport var basePlacements = [top, bottom, right, left];\nexport var start = 'start';\nexport var end = 'end';\nexport var clippingParents = 'clippingParents';\nexport var viewport = 'viewport';\nexport var popper = 'popper';\nexport var reference = 'reference';\nexport var variationPlacements = /*#__PURE__*/basePlacements.reduce(function (acc, placement) {\n return acc.concat([placement + \"-\" + start, placement + \"-\" + end]);\n}, []);\nexport var placements = /*#__PURE__*/[].concat(basePlacements, [auto]).reduce(function (acc, placement) {\n return acc.concat([placement, placement + \"-\" + start, placement + \"-\" + end]);\n}, []); // modifiers that need to read the DOM\n\nexport var beforeRead = 'beforeRead';\nexport var read = 'read';\nexport var afterRead = 'afterRead'; // pure-logic modifiers\n\nexport var beforeMain = 'beforeMain';\nexport var main = 'main';\nexport var afterMain = 'afterMain'; // modifier with the purpose to write to the DOM (or write into a framework state)\n\nexport var beforeWrite = 'beforeWrite';\nexport var write = 'write';\nexport var afterWrite = 'afterWrite';\nexport var modifierPhases = [beforeRead, read, afterRead, beforeMain, main, afterMain, beforeWrite, write, afterWrite];","export default function getNodeName(element) {\n return element ? (element.nodeName || '').toLowerCase() : null;\n}","export default function getWindow(node) {\n if (node == null) {\n return window;\n }\n\n if (node.toString() !== '[object Window]') {\n var ownerDocument = node.ownerDocument;\n return ownerDocument ? ownerDocument.defaultView || window : window;\n }\n\n return node;\n}","import getWindow from \"./getWindow.js\";\n\nfunction isElement(node) {\n var OwnElement = getWindow(node).Element;\n return node instanceof OwnElement || node instanceof Element;\n}\n\nfunction isHTMLElement(node) {\n var OwnElement = getWindow(node).HTMLElement;\n return node instanceof OwnElement || node instanceof HTMLElement;\n}\n\nfunction isShadowRoot(node) {\n // IE 11 has no ShadowRoot\n if (typeof ShadowRoot === 'undefined') {\n return false;\n }\n\n var OwnElement = getWindow(node).ShadowRoot;\n return node instanceof OwnElement || node instanceof ShadowRoot;\n}\n\nexport { isElement, isHTMLElement, isShadowRoot };","import getNodeName from \"../dom-utils/getNodeName.js\";\nimport { isHTMLElement } from \"../dom-utils/instanceOf.js\"; // This modifier takes the styles prepared by the `computeStyles` modifier\n// and applies them to the HTMLElements such as popper and arrow\n\nfunction applyStyles(_ref) {\n var state = _ref.state;\n Object.keys(state.elements).forEach(function (name) {\n var style = state.styles[name] || {};\n var attributes = state.attributes[name] || {};\n var element = state.elements[name]; // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n } // Flow doesn't support to extend this property, but it's the most\n // effective way to apply styles to an HTMLElement\n // $FlowFixMe[cannot-write]\n\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (name) {\n var value = attributes[name];\n\n if (value === false) {\n element.removeAttribute(name);\n } else {\n element.setAttribute(name, value === true ? '' : value);\n }\n });\n });\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state;\n var initialStyles = {\n popper: {\n position: state.options.strategy,\n left: '0',\n top: '0',\n margin: '0'\n },\n arrow: {\n position: 'absolute'\n },\n reference: {}\n };\n Object.assign(state.elements.popper.style, initialStyles.popper);\n state.styles = initialStyles;\n\n if (state.elements.arrow) {\n Object.assign(state.elements.arrow.style, initialStyles.arrow);\n }\n\n return function () {\n Object.keys(state.elements).forEach(function (name) {\n var element = state.elements[name];\n var attributes = state.attributes[name] || {};\n var styleProperties = Object.keys(state.styles.hasOwnProperty(name) ? state.styles[name] : initialStyles[name]); // Set all values to an empty string to unset them\n\n var style = styleProperties.reduce(function (style, property) {\n style[property] = '';\n return style;\n }, {}); // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n }\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (attribute) {\n element.removeAttribute(attribute);\n });\n });\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'applyStyles',\n enabled: true,\n phase: 'write',\n fn: applyStyles,\n effect: effect,\n requires: ['computeStyles']\n};","import { auto } from \"../enums.js\";\nexport default function getBasePlacement(placement) {\n return placement.split('-')[0];\n}","export var max = Math.max;\nexport var min = Math.min;\nexport var round = Math.round;","export default function getUAString() {\n var uaData = navigator.userAgentData;\n\n if (uaData != null && uaData.brands && Array.isArray(uaData.brands)) {\n return uaData.brands.map(function (item) {\n return item.brand + \"/\" + item.version;\n }).join(' ');\n }\n\n return navigator.userAgent;\n}","import getUAString from \"../utils/userAgent.js\";\nexport default function isLayoutViewport() {\n return !/^((?!chrome|android).)*safari/i.test(getUAString());\n}","import { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport { round } from \"../utils/math.js\";\nimport getWindow from \"./getWindow.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getBoundingClientRect(element, includeScale, isFixedStrategy) {\n if (includeScale === void 0) {\n includeScale = false;\n }\n\n if (isFixedStrategy === void 0) {\n isFixedStrategy = false;\n }\n\n var clientRect = element.getBoundingClientRect();\n var scaleX = 1;\n var scaleY = 1;\n\n if (includeScale && isHTMLElement(element)) {\n scaleX = element.offsetWidth > 0 ? round(clientRect.width) / element.offsetWidth || 1 : 1;\n scaleY = element.offsetHeight > 0 ? round(clientRect.height) / element.offsetHeight || 1 : 1;\n }\n\n var _ref = isElement(element) ? getWindow(element) : window,\n visualViewport = _ref.visualViewport;\n\n var addVisualOffsets = !isLayoutViewport() && isFixedStrategy;\n var x = (clientRect.left + (addVisualOffsets && visualViewport ? visualViewport.offsetLeft : 0)) / scaleX;\n var y = (clientRect.top + (addVisualOffsets && visualViewport ? visualViewport.offsetTop : 0)) / scaleY;\n var width = clientRect.width / scaleX;\n var height = clientRect.height / scaleY;\n return {\n width: width,\n height: height,\n top: y,\n right: x + width,\n bottom: y + height,\n left: x,\n x: x,\n y: y\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\"; // Returns the layout rect of an element relative to its offsetParent. Layout\n// means it doesn't take into account transforms.\n\nexport default function getLayoutRect(element) {\n var clientRect = getBoundingClientRect(element); // Use the clientRect sizes if it's not been transformed.\n // Fixes https://github.com/popperjs/popper-core/issues/1223\n\n var width = element.offsetWidth;\n var height = element.offsetHeight;\n\n if (Math.abs(clientRect.width - width) <= 1) {\n width = clientRect.width;\n }\n\n if (Math.abs(clientRect.height - height) <= 1) {\n height = clientRect.height;\n }\n\n return {\n x: element.offsetLeft,\n y: element.offsetTop,\n width: width,\n height: height\n };\n}","import { isShadowRoot } from \"./instanceOf.js\";\nexport default function contains(parent, child) {\n var rootNode = child.getRootNode && child.getRootNode(); // First, attempt with faster native method\n\n if (parent.contains(child)) {\n return true;\n } // then fallback to custom implementation with Shadow DOM support\n else if (rootNode && isShadowRoot(rootNode)) {\n var next = child;\n\n do {\n if (next && parent.isSameNode(next)) {\n return true;\n } // $FlowFixMe[prop-missing]: need a better way to handle this...\n\n\n next = next.parentNode || next.host;\n } while (next);\n } // Give up, the result is false\n\n\n return false;\n}","import getWindow from \"./getWindow.js\";\nexport default function getComputedStyle(element) {\n return getWindow(element).getComputedStyle(element);\n}","import getNodeName from \"./getNodeName.js\";\nexport default function isTableElement(element) {\n return ['table', 'td', 'th'].indexOf(getNodeName(element)) >= 0;\n}","import { isElement } from \"./instanceOf.js\";\nexport default function getDocumentElement(element) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return ((isElement(element) ? element.ownerDocument : // $FlowFixMe[prop-missing]\n element.document) || window.document).documentElement;\n}","import getNodeName from \"./getNodeName.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport { isShadowRoot } from \"./instanceOf.js\";\nexport default function getParentNode(element) {\n if (getNodeName(element) === 'html') {\n return element;\n }\n\n return (// this is a quicker (but less type safe) way to save quite some bytes from the bundle\n // $FlowFixMe[incompatible-return]\n // $FlowFixMe[prop-missing]\n element.assignedSlot || // step into the shadow DOM of the parent of a slotted node\n element.parentNode || ( // DOM Element detected\n isShadowRoot(element) ? element.host : null) || // ShadowRoot detected\n // $FlowFixMe[incompatible-call]: HTMLElement is a Node\n getDocumentElement(element) // fallback\n\n );\n}","import getWindow from \"./getWindow.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isHTMLElement, isShadowRoot } from \"./instanceOf.js\";\nimport isTableElement from \"./isTableElement.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getUAString from \"../utils/userAgent.js\";\n\nfunction getTrueOffsetParent(element) {\n if (!isHTMLElement(element) || // https://github.com/popperjs/popper-core/issues/837\n getComputedStyle(element).position === 'fixed') {\n return null;\n }\n\n return element.offsetParent;\n} // `.offsetParent` reports `null` for fixed elements, while absolute elements\n// return the containing block\n\n\nfunction getContainingBlock(element) {\n var isFirefox = /firefox/i.test(getUAString());\n var isIE = /Trident/i.test(getUAString());\n\n if (isIE && isHTMLElement(element)) {\n // In IE 9, 10 and 11 fixed elements containing block is always established by the viewport\n var elementCss = getComputedStyle(element);\n\n if (elementCss.position === 'fixed') {\n return null;\n }\n }\n\n var currentNode = getParentNode(element);\n\n if (isShadowRoot(currentNode)) {\n currentNode = currentNode.host;\n }\n\n while (isHTMLElement(currentNode) && ['html', 'body'].indexOf(getNodeName(currentNode)) < 0) {\n var css = getComputedStyle(currentNode); // This is non-exhaustive but covers the most common CSS properties that\n // create a containing block.\n // https://developer.mozilla.org/en-US/docs/Web/CSS/Containing_block#identifying_the_containing_block\n\n if (css.transform !== 'none' || css.perspective !== 'none' || css.contain === 'paint' || ['transform', 'perspective'].indexOf(css.willChange) !== -1 || isFirefox && css.willChange === 'filter' || isFirefox && css.filter && css.filter !== 'none') {\n return currentNode;\n } else {\n currentNode = currentNode.parentNode;\n }\n }\n\n return null;\n} // Gets the closest ancestor positioned element. Handles some edge cases,\n// such as table ancestors and cross browser bugs.\n\n\nexport default function getOffsetParent(element) {\n var window = getWindow(element);\n var offsetParent = getTrueOffsetParent(element);\n\n while (offsetParent && isTableElement(offsetParent) && getComputedStyle(offsetParent).position === 'static') {\n offsetParent = getTrueOffsetParent(offsetParent);\n }\n\n if (offsetParent && (getNodeName(offsetParent) === 'html' || getNodeName(offsetParent) === 'body' && getComputedStyle(offsetParent).position === 'static')) {\n return window;\n }\n\n return offsetParent || getContainingBlock(element) || window;\n}","export default function getMainAxisFromPlacement(placement) {\n return ['top', 'bottom'].indexOf(placement) >= 0 ? 'x' : 'y';\n}","import { max as mathMax, min as mathMin } from \"./math.js\";\nexport function within(min, value, max) {\n return mathMax(min, mathMin(value, max));\n}\nexport function withinMaxClamp(min, value, max) {\n var v = within(min, value, max);\n return v > max ? max : v;\n}","import getFreshSideObject from \"./getFreshSideObject.js\";\nexport default function mergePaddingObject(paddingObject) {\n return Object.assign({}, getFreshSideObject(), paddingObject);\n}","export default function getFreshSideObject() {\n return {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0\n };\n}","export default function expandToHashMap(value, keys) {\n return keys.reduce(function (hashMap, key) {\n hashMap[key] = value;\n return hashMap;\n }, {});\n}","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport contains from \"../dom-utils/contains.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport { within } from \"../utils/within.js\";\nimport mergePaddingObject from \"../utils/mergePaddingObject.js\";\nimport expandToHashMap from \"../utils/expandToHashMap.js\";\nimport { left, right, basePlacements, top, bottom } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar toPaddingObject = function toPaddingObject(padding, state) {\n padding = typeof padding === 'function' ? padding(Object.assign({}, state.rects, {\n placement: state.placement\n })) : padding;\n return mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n};\n\nfunction arrow(_ref) {\n var _state$modifiersData$;\n\n var state = _ref.state,\n name = _ref.name,\n options = _ref.options;\n var arrowElement = state.elements.arrow;\n var popperOffsets = state.modifiersData.popperOffsets;\n var basePlacement = getBasePlacement(state.placement);\n var axis = getMainAxisFromPlacement(basePlacement);\n var isVertical = [left, right].indexOf(basePlacement) >= 0;\n var len = isVertical ? 'height' : 'width';\n\n if (!arrowElement || !popperOffsets) {\n return;\n }\n\n var paddingObject = toPaddingObject(options.padding, state);\n var arrowRect = getLayoutRect(arrowElement);\n var minProp = axis === 'y' ? top : left;\n var maxProp = axis === 'y' ? bottom : right;\n var endDiff = state.rects.reference[len] + state.rects.reference[axis] - popperOffsets[axis] - state.rects.popper[len];\n var startDiff = popperOffsets[axis] - state.rects.reference[axis];\n var arrowOffsetParent = getOffsetParent(arrowElement);\n var clientSize = arrowOffsetParent ? axis === 'y' ? arrowOffsetParent.clientHeight || 0 : arrowOffsetParent.clientWidth || 0 : 0;\n var centerToReference = endDiff / 2 - startDiff / 2; // Make sure the arrow doesn't overflow the popper if the center point is\n // outside of the popper bounds\n\n var min = paddingObject[minProp];\n var max = clientSize - arrowRect[len] - paddingObject[maxProp];\n var center = clientSize / 2 - arrowRect[len] / 2 + centerToReference;\n var offset = within(min, center, max); // Prevents breaking syntax highlighting...\n\n var axisProp = axis;\n state.modifiersData[name] = (_state$modifiersData$ = {}, _state$modifiersData$[axisProp] = offset, _state$modifiersData$.centerOffset = offset - center, _state$modifiersData$);\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state,\n options = _ref2.options;\n var _options$element = options.element,\n arrowElement = _options$element === void 0 ? '[data-popper-arrow]' : _options$element;\n\n if (arrowElement == null) {\n return;\n } // CSS selector\n\n\n if (typeof arrowElement === 'string') {\n arrowElement = state.elements.popper.querySelector(arrowElement);\n\n if (!arrowElement) {\n return;\n }\n }\n\n if (!contains(state.elements.popper, arrowElement)) {\n return;\n }\n\n state.elements.arrow = arrowElement;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'arrow',\n enabled: true,\n phase: 'main',\n fn: arrow,\n effect: effect,\n requires: ['popperOffsets'],\n requiresIfExists: ['preventOverflow']\n};","export default function getVariation(placement) {\n return placement.split('-')[1];\n}","import { top, left, right, bottom, end } from \"../enums.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getWindow from \"../dom-utils/getWindow.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getComputedStyle from \"../dom-utils/getComputedStyle.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport { round } from \"../utils/math.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar unsetSides = {\n top: 'auto',\n right: 'auto',\n bottom: 'auto',\n left: 'auto'\n}; // Round the offsets to the nearest suitable subpixel based on the DPR.\n// Zooming can change the DPR, but it seems to report a value that will\n// cleanly divide the values into the appropriate subpixels.\n\nfunction roundOffsetsByDPR(_ref, win) {\n var x = _ref.x,\n y = _ref.y;\n var dpr = win.devicePixelRatio || 1;\n return {\n x: round(x * dpr) / dpr || 0,\n y: round(y * dpr) / dpr || 0\n };\n}\n\nexport function mapToStyles(_ref2) {\n var _Object$assign2;\n\n var popper = _ref2.popper,\n popperRect = _ref2.popperRect,\n placement = _ref2.placement,\n variation = _ref2.variation,\n offsets = _ref2.offsets,\n position = _ref2.position,\n gpuAcceleration = _ref2.gpuAcceleration,\n adaptive = _ref2.adaptive,\n roundOffsets = _ref2.roundOffsets,\n isFixed = _ref2.isFixed;\n var _offsets$x = offsets.x,\n x = _offsets$x === void 0 ? 0 : _offsets$x,\n _offsets$y = offsets.y,\n y = _offsets$y === void 0 ? 0 : _offsets$y;\n\n var _ref3 = typeof roundOffsets === 'function' ? roundOffsets({\n x: x,\n y: y\n }) : {\n x: x,\n y: y\n };\n\n x = _ref3.x;\n y = _ref3.y;\n var hasX = offsets.hasOwnProperty('x');\n var hasY = offsets.hasOwnProperty('y');\n var sideX = left;\n var sideY = top;\n var win = window;\n\n if (adaptive) {\n var offsetParent = getOffsetParent(popper);\n var heightProp = 'clientHeight';\n var widthProp = 'clientWidth';\n\n if (offsetParent === getWindow(popper)) {\n offsetParent = getDocumentElement(popper);\n\n if (getComputedStyle(offsetParent).position !== 'static' && position === 'absolute') {\n heightProp = 'scrollHeight';\n widthProp = 'scrollWidth';\n }\n } // $FlowFixMe[incompatible-cast]: force type refinement, we compare offsetParent with window above, but Flow doesn't detect it\n\n\n offsetParent = offsetParent;\n\n if (placement === top || (placement === left || placement === right) && variation === end) {\n sideY = bottom;\n var offsetY = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.height : // $FlowFixMe[prop-missing]\n offsetParent[heightProp];\n y -= offsetY - popperRect.height;\n y *= gpuAcceleration ? 1 : -1;\n }\n\n if (placement === left || (placement === top || placement === bottom) && variation === end) {\n sideX = right;\n var offsetX = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.width : // $FlowFixMe[prop-missing]\n offsetParent[widthProp];\n x -= offsetX - popperRect.width;\n x *= gpuAcceleration ? 1 : -1;\n }\n }\n\n var commonStyles = Object.assign({\n position: position\n }, adaptive && unsetSides);\n\n var _ref4 = roundOffsets === true ? roundOffsetsByDPR({\n x: x,\n y: y\n }, getWindow(popper)) : {\n x: x,\n y: y\n };\n\n x = _ref4.x;\n y = _ref4.y;\n\n if (gpuAcceleration) {\n var _Object$assign;\n\n return Object.assign({}, commonStyles, (_Object$assign = {}, _Object$assign[sideY] = hasY ? '0' : '', _Object$assign[sideX] = hasX ? '0' : '', _Object$assign.transform = (win.devicePixelRatio || 1) <= 1 ? \"translate(\" + x + \"px, \" + y + \"px)\" : \"translate3d(\" + x + \"px, \" + y + \"px, 0)\", _Object$assign));\n }\n\n return Object.assign({}, commonStyles, (_Object$assign2 = {}, _Object$assign2[sideY] = hasY ? y + \"px\" : '', _Object$assign2[sideX] = hasX ? x + \"px\" : '', _Object$assign2.transform = '', _Object$assign2));\n}\n\nfunction computeStyles(_ref5) {\n var state = _ref5.state,\n options = _ref5.options;\n var _options$gpuAccelerat = options.gpuAcceleration,\n gpuAcceleration = _options$gpuAccelerat === void 0 ? true : _options$gpuAccelerat,\n _options$adaptive = options.adaptive,\n adaptive = _options$adaptive === void 0 ? true : _options$adaptive,\n _options$roundOffsets = options.roundOffsets,\n roundOffsets = _options$roundOffsets === void 0 ? true : _options$roundOffsets;\n var commonStyles = {\n placement: getBasePlacement(state.placement),\n variation: getVariation(state.placement),\n popper: state.elements.popper,\n popperRect: state.rects.popper,\n gpuAcceleration: gpuAcceleration,\n isFixed: state.options.strategy === 'fixed'\n };\n\n if (state.modifiersData.popperOffsets != null) {\n state.styles.popper = Object.assign({}, state.styles.popper, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.popperOffsets,\n position: state.options.strategy,\n adaptive: adaptive,\n roundOffsets: roundOffsets\n })));\n }\n\n if (state.modifiersData.arrow != null) {\n state.styles.arrow = Object.assign({}, state.styles.arrow, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.arrow,\n position: 'absolute',\n adaptive: false,\n roundOffsets: roundOffsets\n })));\n }\n\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-placement': state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'computeStyles',\n enabled: true,\n phase: 'beforeWrite',\n fn: computeStyles,\n data: {}\n};","import getWindow from \"../dom-utils/getWindow.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar passive = {\n passive: true\n};\n\nfunction effect(_ref) {\n var state = _ref.state,\n instance = _ref.instance,\n options = _ref.options;\n var _options$scroll = options.scroll,\n scroll = _options$scroll === void 0 ? true : _options$scroll,\n _options$resize = options.resize,\n resize = _options$resize === void 0 ? true : _options$resize;\n var window = getWindow(state.elements.popper);\n var scrollParents = [].concat(state.scrollParents.reference, state.scrollParents.popper);\n\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.addEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.addEventListener('resize', instance.update, passive);\n }\n\n return function () {\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.removeEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.removeEventListener('resize', instance.update, passive);\n }\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'eventListeners',\n enabled: true,\n phase: 'write',\n fn: function fn() {},\n effect: effect,\n data: {}\n};","var hash = {\n left: 'right',\n right: 'left',\n bottom: 'top',\n top: 'bottom'\n};\nexport default function getOppositePlacement(placement) {\n return placement.replace(/left|right|bottom|top/g, function (matched) {\n return hash[matched];\n });\n}","var hash = {\n start: 'end',\n end: 'start'\n};\nexport default function getOppositeVariationPlacement(placement) {\n return placement.replace(/start|end/g, function (matched) {\n return hash[matched];\n });\n}","import getWindow from \"./getWindow.js\";\nexport default function getWindowScroll(node) {\n var win = getWindow(node);\n var scrollLeft = win.pageXOffset;\n var scrollTop = win.pageYOffset;\n return {\n scrollLeft: scrollLeft,\n scrollTop: scrollTop\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nexport default function getWindowScrollBarX(element) {\n // If has a CSS width greater than the viewport, then this will be\n // incorrect for RTL.\n // Popper 1 is broken in this case and never had a bug report so let's assume\n // it's not an issue. I don't think anyone ever specifies width on \n // anyway.\n // Browsers where the left scrollbar doesn't cause an issue report `0` for\n // this (e.g. Edge 2019, IE11, Safari)\n return getBoundingClientRect(getDocumentElement(element)).left + getWindowScroll(element).scrollLeft;\n}","import getComputedStyle from \"./getComputedStyle.js\";\nexport default function isScrollParent(element) {\n // Firefox wants us to check `-x` and `-y` variations as well\n var _getComputedStyle = getComputedStyle(element),\n overflow = _getComputedStyle.overflow,\n overflowX = _getComputedStyle.overflowX,\n overflowY = _getComputedStyle.overflowY;\n\n return /auto|scroll|overlay|hidden/.test(overflow + overflowY + overflowX);\n}","import getParentNode from \"./getParentNode.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nexport default function getScrollParent(node) {\n if (['html', 'body', '#document'].indexOf(getNodeName(node)) >= 0) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return node.ownerDocument.body;\n }\n\n if (isHTMLElement(node) && isScrollParent(node)) {\n return node;\n }\n\n return getScrollParent(getParentNode(node));\n}","import getScrollParent from \"./getScrollParent.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getWindow from \"./getWindow.js\";\nimport isScrollParent from \"./isScrollParent.js\";\n/*\ngiven a DOM element, return the list of all scroll parents, up the list of ancesors\nuntil we get to the top window object. This list is what we attach scroll listeners\nto, because if any of these parent elements scroll, we'll need to re-calculate the\nreference element's position.\n*/\n\nexport default function listScrollParents(element, list) {\n var _element$ownerDocumen;\n\n if (list === void 0) {\n list = [];\n }\n\n var scrollParent = getScrollParent(element);\n var isBody = scrollParent === ((_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body);\n var win = getWindow(scrollParent);\n var target = isBody ? [win].concat(win.visualViewport || [], isScrollParent(scrollParent) ? scrollParent : []) : scrollParent;\n var updatedList = list.concat(target);\n return isBody ? updatedList : // $FlowFixMe[incompatible-call]: isBody tells us target will be an HTMLElement here\n updatedList.concat(listScrollParents(getParentNode(target)));\n}","export default function rectToClientRect(rect) {\n return Object.assign({}, rect, {\n left: rect.x,\n top: rect.y,\n right: rect.x + rect.width,\n bottom: rect.y + rect.height\n });\n}","import { viewport } from \"../enums.js\";\nimport getViewportRect from \"./getViewportRect.js\";\nimport getDocumentRect from \"./getDocumentRect.js\";\nimport listScrollParents from \"./listScrollParents.js\";\nimport getOffsetParent from \"./getOffsetParent.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport contains from \"./contains.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport rectToClientRect from \"../utils/rectToClientRect.js\";\nimport { max, min } from \"../utils/math.js\";\n\nfunction getInnerBoundingClientRect(element, strategy) {\n var rect = getBoundingClientRect(element, false, strategy === 'fixed');\n rect.top = rect.top + element.clientTop;\n rect.left = rect.left + element.clientLeft;\n rect.bottom = rect.top + element.clientHeight;\n rect.right = rect.left + element.clientWidth;\n rect.width = element.clientWidth;\n rect.height = element.clientHeight;\n rect.x = rect.left;\n rect.y = rect.top;\n return rect;\n}\n\nfunction getClientRectFromMixedType(element, clippingParent, strategy) {\n return clippingParent === viewport ? rectToClientRect(getViewportRect(element, strategy)) : isElement(clippingParent) ? getInnerBoundingClientRect(clippingParent, strategy) : rectToClientRect(getDocumentRect(getDocumentElement(element)));\n} // A \"clipping parent\" is an overflowable container with the characteristic of\n// clipping (or hiding) overflowing elements with a position different from\n// `initial`\n\n\nfunction getClippingParents(element) {\n var clippingParents = listScrollParents(getParentNode(element));\n var canEscapeClipping = ['absolute', 'fixed'].indexOf(getComputedStyle(element).position) >= 0;\n var clipperElement = canEscapeClipping && isHTMLElement(element) ? getOffsetParent(element) : element;\n\n if (!isElement(clipperElement)) {\n return [];\n } // $FlowFixMe[incompatible-return]: https://github.com/facebook/flow/issues/1414\n\n\n return clippingParents.filter(function (clippingParent) {\n return isElement(clippingParent) && contains(clippingParent, clipperElement) && getNodeName(clippingParent) !== 'body';\n });\n} // Gets the maximum area that the element is visible in due to any number of\n// clipping parents\n\n\nexport default function getClippingRect(element, boundary, rootBoundary, strategy) {\n var mainClippingParents = boundary === 'clippingParents' ? getClippingParents(element) : [].concat(boundary);\n var clippingParents = [].concat(mainClippingParents, [rootBoundary]);\n var firstClippingParent = clippingParents[0];\n var clippingRect = clippingParents.reduce(function (accRect, clippingParent) {\n var rect = getClientRectFromMixedType(element, clippingParent, strategy);\n accRect.top = max(rect.top, accRect.top);\n accRect.right = min(rect.right, accRect.right);\n accRect.bottom = min(rect.bottom, accRect.bottom);\n accRect.left = max(rect.left, accRect.left);\n return accRect;\n }, getClientRectFromMixedType(element, firstClippingParent, strategy));\n clippingRect.width = clippingRect.right - clippingRect.left;\n clippingRect.height = clippingRect.bottom - clippingRect.top;\n clippingRect.x = clippingRect.left;\n clippingRect.y = clippingRect.top;\n return clippingRect;\n}","import getWindow from \"./getWindow.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getViewportRect(element, strategy) {\n var win = getWindow(element);\n var html = getDocumentElement(element);\n var visualViewport = win.visualViewport;\n var width = html.clientWidth;\n var height = html.clientHeight;\n var x = 0;\n var y = 0;\n\n if (visualViewport) {\n width = visualViewport.width;\n height = visualViewport.height;\n var layoutViewport = isLayoutViewport();\n\n if (layoutViewport || !layoutViewport && strategy === 'fixed') {\n x = visualViewport.offsetLeft;\n y = visualViewport.offsetTop;\n }\n }\n\n return {\n width: width,\n height: height,\n x: x + getWindowScrollBarX(element),\n y: y\n };\n}","import getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nimport { max } from \"../utils/math.js\"; // Gets the entire size of the scrollable document area, even extending outside\n// of the `` and `` rect bounds if horizontally scrollable\n\nexport default function getDocumentRect(element) {\n var _element$ownerDocumen;\n\n var html = getDocumentElement(element);\n var winScroll = getWindowScroll(element);\n var body = (_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body;\n var width = max(html.scrollWidth, html.clientWidth, body ? body.scrollWidth : 0, body ? body.clientWidth : 0);\n var height = max(html.scrollHeight, html.clientHeight, body ? body.scrollHeight : 0, body ? body.clientHeight : 0);\n var x = -winScroll.scrollLeft + getWindowScrollBarX(element);\n var y = -winScroll.scrollTop;\n\n if (getComputedStyle(body || html).direction === 'rtl') {\n x += max(html.clientWidth, body ? body.clientWidth : 0) - width;\n }\n\n return {\n width: width,\n height: height,\n x: x,\n y: y\n };\n}","import getBasePlacement from \"./getBasePlacement.js\";\nimport getVariation from \"./getVariation.js\";\nimport getMainAxisFromPlacement from \"./getMainAxisFromPlacement.js\";\nimport { top, right, bottom, left, start, end } from \"../enums.js\";\nexport default function computeOffsets(_ref) {\n var reference = _ref.reference,\n element = _ref.element,\n placement = _ref.placement;\n var basePlacement = placement ? getBasePlacement(placement) : null;\n var variation = placement ? getVariation(placement) : null;\n var commonX = reference.x + reference.width / 2 - element.width / 2;\n var commonY = reference.y + reference.height / 2 - element.height / 2;\n var offsets;\n\n switch (basePlacement) {\n case top:\n offsets = {\n x: commonX,\n y: reference.y - element.height\n };\n break;\n\n case bottom:\n offsets = {\n x: commonX,\n y: reference.y + reference.height\n };\n break;\n\n case right:\n offsets = {\n x: reference.x + reference.width,\n y: commonY\n };\n break;\n\n case left:\n offsets = {\n x: reference.x - element.width,\n y: commonY\n };\n break;\n\n default:\n offsets = {\n x: reference.x,\n y: reference.y\n };\n }\n\n var mainAxis = basePlacement ? getMainAxisFromPlacement(basePlacement) : null;\n\n if (mainAxis != null) {\n var len = mainAxis === 'y' ? 'height' : 'width';\n\n switch (variation) {\n case start:\n offsets[mainAxis] = offsets[mainAxis] - (reference[len] / 2 - element[len] / 2);\n break;\n\n case end:\n offsets[mainAxis] = offsets[mainAxis] + (reference[len] / 2 - element[len] / 2);\n break;\n\n default:\n }\n }\n\n return offsets;\n}","import getClippingRect from \"../dom-utils/getClippingRect.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getBoundingClientRect from \"../dom-utils/getBoundingClientRect.js\";\nimport computeOffsets from \"./computeOffsets.js\";\nimport rectToClientRect from \"./rectToClientRect.js\";\nimport { clippingParents, reference, popper, bottom, top, right, basePlacements, viewport } from \"../enums.js\";\nimport { isElement } from \"../dom-utils/instanceOf.js\";\nimport mergePaddingObject from \"./mergePaddingObject.js\";\nimport expandToHashMap from \"./expandToHashMap.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport default function detectOverflow(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n _options$placement = _options.placement,\n placement = _options$placement === void 0 ? state.placement : _options$placement,\n _options$strategy = _options.strategy,\n strategy = _options$strategy === void 0 ? state.strategy : _options$strategy,\n _options$boundary = _options.boundary,\n boundary = _options$boundary === void 0 ? clippingParents : _options$boundary,\n _options$rootBoundary = _options.rootBoundary,\n rootBoundary = _options$rootBoundary === void 0 ? viewport : _options$rootBoundary,\n _options$elementConte = _options.elementContext,\n elementContext = _options$elementConte === void 0 ? popper : _options$elementConte,\n _options$altBoundary = _options.altBoundary,\n altBoundary = _options$altBoundary === void 0 ? false : _options$altBoundary,\n _options$padding = _options.padding,\n padding = _options$padding === void 0 ? 0 : _options$padding;\n var paddingObject = mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n var altContext = elementContext === popper ? reference : popper;\n var popperRect = state.rects.popper;\n var element = state.elements[altBoundary ? altContext : elementContext];\n var clippingClientRect = getClippingRect(isElement(element) ? element : element.contextElement || getDocumentElement(state.elements.popper), boundary, rootBoundary, strategy);\n var referenceClientRect = getBoundingClientRect(state.elements.reference);\n var popperOffsets = computeOffsets({\n reference: referenceClientRect,\n element: popperRect,\n strategy: 'absolute',\n placement: placement\n });\n var popperClientRect = rectToClientRect(Object.assign({}, popperRect, popperOffsets));\n var elementClientRect = elementContext === popper ? popperClientRect : referenceClientRect; // positive = overflowing the clipping rect\n // 0 or negative = within the clipping rect\n\n var overflowOffsets = {\n top: clippingClientRect.top - elementClientRect.top + paddingObject.top,\n bottom: elementClientRect.bottom - clippingClientRect.bottom + paddingObject.bottom,\n left: clippingClientRect.left - elementClientRect.left + paddingObject.left,\n right: elementClientRect.right - clippingClientRect.right + paddingObject.right\n };\n var offsetData = state.modifiersData.offset; // Offsets can be applied only to the popper element\n\n if (elementContext === popper && offsetData) {\n var offset = offsetData[placement];\n Object.keys(overflowOffsets).forEach(function (key) {\n var multiply = [right, bottom].indexOf(key) >= 0 ? 1 : -1;\n var axis = [top, bottom].indexOf(key) >= 0 ? 'y' : 'x';\n overflowOffsets[key] += offset[axis] * multiply;\n });\n }\n\n return overflowOffsets;\n}","import getOppositePlacement from \"../utils/getOppositePlacement.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getOppositeVariationPlacement from \"../utils/getOppositeVariationPlacement.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport computeAutoPlacement from \"../utils/computeAutoPlacement.js\";\nimport { bottom, top, start, right, left, auto } from \"../enums.js\";\nimport getVariation from \"../utils/getVariation.js\"; // eslint-disable-next-line import/no-unused-modules\n\nfunction getExpandedFallbackPlacements(placement) {\n if (getBasePlacement(placement) === auto) {\n return [];\n }\n\n var oppositePlacement = getOppositePlacement(placement);\n return [getOppositeVariationPlacement(placement), oppositePlacement, getOppositeVariationPlacement(oppositePlacement)];\n}\n\nfunction flip(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n\n if (state.modifiersData[name]._skip) {\n return;\n }\n\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? true : _options$altAxis,\n specifiedFallbackPlacements = options.fallbackPlacements,\n padding = options.padding,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n _options$flipVariatio = options.flipVariations,\n flipVariations = _options$flipVariatio === void 0 ? true : _options$flipVariatio,\n allowedAutoPlacements = options.allowedAutoPlacements;\n var preferredPlacement = state.options.placement;\n var basePlacement = getBasePlacement(preferredPlacement);\n var isBasePlacement = basePlacement === preferredPlacement;\n var fallbackPlacements = specifiedFallbackPlacements || (isBasePlacement || !flipVariations ? [getOppositePlacement(preferredPlacement)] : getExpandedFallbackPlacements(preferredPlacement));\n var placements = [preferredPlacement].concat(fallbackPlacements).reduce(function (acc, placement) {\n return acc.concat(getBasePlacement(placement) === auto ? computeAutoPlacement(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n flipVariations: flipVariations,\n allowedAutoPlacements: allowedAutoPlacements\n }) : placement);\n }, []);\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var checksMap = new Map();\n var makeFallbackChecks = true;\n var firstFittingPlacement = placements[0];\n\n for (var i = 0; i < placements.length; i++) {\n var placement = placements[i];\n\n var _basePlacement = getBasePlacement(placement);\n\n var isStartVariation = getVariation(placement) === start;\n var isVertical = [top, bottom].indexOf(_basePlacement) >= 0;\n var len = isVertical ? 'width' : 'height';\n var overflow = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n altBoundary: altBoundary,\n padding: padding\n });\n var mainVariationSide = isVertical ? isStartVariation ? right : left : isStartVariation ? bottom : top;\n\n if (referenceRect[len] > popperRect[len]) {\n mainVariationSide = getOppositePlacement(mainVariationSide);\n }\n\n var altVariationSide = getOppositePlacement(mainVariationSide);\n var checks = [];\n\n if (checkMainAxis) {\n checks.push(overflow[_basePlacement] <= 0);\n }\n\n if (checkAltAxis) {\n checks.push(overflow[mainVariationSide] <= 0, overflow[altVariationSide] <= 0);\n }\n\n if (checks.every(function (check) {\n return check;\n })) {\n firstFittingPlacement = placement;\n makeFallbackChecks = false;\n break;\n }\n\n checksMap.set(placement, checks);\n }\n\n if (makeFallbackChecks) {\n // `2` may be desired in some cases – research later\n var numberOfChecks = flipVariations ? 3 : 1;\n\n var _loop = function _loop(_i) {\n var fittingPlacement = placements.find(function (placement) {\n var checks = checksMap.get(placement);\n\n if (checks) {\n return checks.slice(0, _i).every(function (check) {\n return check;\n });\n }\n });\n\n if (fittingPlacement) {\n firstFittingPlacement = fittingPlacement;\n return \"break\";\n }\n };\n\n for (var _i = numberOfChecks; _i > 0; _i--) {\n var _ret = _loop(_i);\n\n if (_ret === \"break\") break;\n }\n }\n\n if (state.placement !== firstFittingPlacement) {\n state.modifiersData[name]._skip = true;\n state.placement = firstFittingPlacement;\n state.reset = true;\n }\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'flip',\n enabled: true,\n phase: 'main',\n fn: flip,\n requiresIfExists: ['offset'],\n data: {\n _skip: false\n }\n};","import getVariation from \"./getVariation.js\";\nimport { variationPlacements, basePlacements, placements as allPlacements } from \"../enums.js\";\nimport detectOverflow from \"./detectOverflow.js\";\nimport getBasePlacement from \"./getBasePlacement.js\";\nexport default function computeAutoPlacement(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n placement = _options.placement,\n boundary = _options.boundary,\n rootBoundary = _options.rootBoundary,\n padding = _options.padding,\n flipVariations = _options.flipVariations,\n _options$allowedAutoP = _options.allowedAutoPlacements,\n allowedAutoPlacements = _options$allowedAutoP === void 0 ? allPlacements : _options$allowedAutoP;\n var variation = getVariation(placement);\n var placements = variation ? flipVariations ? variationPlacements : variationPlacements.filter(function (placement) {\n return getVariation(placement) === variation;\n }) : basePlacements;\n var allowedPlacements = placements.filter(function (placement) {\n return allowedAutoPlacements.indexOf(placement) >= 0;\n });\n\n if (allowedPlacements.length === 0) {\n allowedPlacements = placements;\n } // $FlowFixMe[incompatible-type]: Flow seems to have problems with two array unions...\n\n\n var overflows = allowedPlacements.reduce(function (acc, placement) {\n acc[placement] = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding\n })[getBasePlacement(placement)];\n return acc;\n }, {});\n return Object.keys(overflows).sort(function (a, b) {\n return overflows[a] - overflows[b];\n });\n}","import { top, bottom, left, right } from \"../enums.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\n\nfunction getSideOffsets(overflow, rect, preventedOffsets) {\n if (preventedOffsets === void 0) {\n preventedOffsets = {\n x: 0,\n y: 0\n };\n }\n\n return {\n top: overflow.top - rect.height - preventedOffsets.y,\n right: overflow.right - rect.width + preventedOffsets.x,\n bottom: overflow.bottom - rect.height + preventedOffsets.y,\n left: overflow.left - rect.width - preventedOffsets.x\n };\n}\n\nfunction isAnySideFullyClipped(overflow) {\n return [top, right, bottom, left].some(function (side) {\n return overflow[side] >= 0;\n });\n}\n\nfunction hide(_ref) {\n var state = _ref.state,\n name = _ref.name;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var preventedOffsets = state.modifiersData.preventOverflow;\n var referenceOverflow = detectOverflow(state, {\n elementContext: 'reference'\n });\n var popperAltOverflow = detectOverflow(state, {\n altBoundary: true\n });\n var referenceClippingOffsets = getSideOffsets(referenceOverflow, referenceRect);\n var popperEscapeOffsets = getSideOffsets(popperAltOverflow, popperRect, preventedOffsets);\n var isReferenceHidden = isAnySideFullyClipped(referenceClippingOffsets);\n var hasPopperEscaped = isAnySideFullyClipped(popperEscapeOffsets);\n state.modifiersData[name] = {\n referenceClippingOffsets: referenceClippingOffsets,\n popperEscapeOffsets: popperEscapeOffsets,\n isReferenceHidden: isReferenceHidden,\n hasPopperEscaped: hasPopperEscaped\n };\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-reference-hidden': isReferenceHidden,\n 'data-popper-escaped': hasPopperEscaped\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'hide',\n enabled: true,\n phase: 'main',\n requiresIfExists: ['preventOverflow'],\n fn: hide\n};","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport { top, left, right, placements } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport function distanceAndSkiddingToXY(placement, rects, offset) {\n var basePlacement = getBasePlacement(placement);\n var invertDistance = [left, top].indexOf(basePlacement) >= 0 ? -1 : 1;\n\n var _ref = typeof offset === 'function' ? offset(Object.assign({}, rects, {\n placement: placement\n })) : offset,\n skidding = _ref[0],\n distance = _ref[1];\n\n skidding = skidding || 0;\n distance = (distance || 0) * invertDistance;\n return [left, right].indexOf(basePlacement) >= 0 ? {\n x: distance,\n y: skidding\n } : {\n x: skidding,\n y: distance\n };\n}\n\nfunction offset(_ref2) {\n var state = _ref2.state,\n options = _ref2.options,\n name = _ref2.name;\n var _options$offset = options.offset,\n offset = _options$offset === void 0 ? [0, 0] : _options$offset;\n var data = placements.reduce(function (acc, placement) {\n acc[placement] = distanceAndSkiddingToXY(placement, state.rects, offset);\n return acc;\n }, {});\n var _data$state$placement = data[state.placement],\n x = _data$state$placement.x,\n y = _data$state$placement.y;\n\n if (state.modifiersData.popperOffsets != null) {\n state.modifiersData.popperOffsets.x += x;\n state.modifiersData.popperOffsets.y += y;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'offset',\n enabled: true,\n phase: 'main',\n requires: ['popperOffsets'],\n fn: offset\n};","import computeOffsets from \"../utils/computeOffsets.js\";\n\nfunction popperOffsets(_ref) {\n var state = _ref.state,\n name = _ref.name;\n // Offsets are the actual position the popper needs to have to be\n // properly positioned near its reference element\n // This is the most basic placement, and will be adjusted by\n // the modifiers in the next step\n state.modifiersData[name] = computeOffsets({\n reference: state.rects.reference,\n element: state.rects.popper,\n strategy: 'absolute',\n placement: state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'popperOffsets',\n enabled: true,\n phase: 'read',\n fn: popperOffsets,\n data: {}\n};","import { top, left, right, bottom, start } from \"../enums.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport getAltAxis from \"../utils/getAltAxis.js\";\nimport { within, withinMaxClamp } from \"../utils/within.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport getFreshSideObject from \"../utils/getFreshSideObject.js\";\nimport { min as mathMin, max as mathMax } from \"../utils/math.js\";\n\nfunction preventOverflow(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? false : _options$altAxis,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n padding = options.padding,\n _options$tether = options.tether,\n tether = _options$tether === void 0 ? true : _options$tether,\n _options$tetherOffset = options.tetherOffset,\n tetherOffset = _options$tetherOffset === void 0 ? 0 : _options$tetherOffset;\n var overflow = detectOverflow(state, {\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n altBoundary: altBoundary\n });\n var basePlacement = getBasePlacement(state.placement);\n var variation = getVariation(state.placement);\n var isBasePlacement = !variation;\n var mainAxis = getMainAxisFromPlacement(basePlacement);\n var altAxis = getAltAxis(mainAxis);\n var popperOffsets = state.modifiersData.popperOffsets;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var tetherOffsetValue = typeof tetherOffset === 'function' ? tetherOffset(Object.assign({}, state.rects, {\n placement: state.placement\n })) : tetherOffset;\n var normalizedTetherOffsetValue = typeof tetherOffsetValue === 'number' ? {\n mainAxis: tetherOffsetValue,\n altAxis: tetherOffsetValue\n } : Object.assign({\n mainAxis: 0,\n altAxis: 0\n }, tetherOffsetValue);\n var offsetModifierState = state.modifiersData.offset ? state.modifiersData.offset[state.placement] : null;\n var data = {\n x: 0,\n y: 0\n };\n\n if (!popperOffsets) {\n return;\n }\n\n if (checkMainAxis) {\n var _offsetModifierState$;\n\n var mainSide = mainAxis === 'y' ? top : left;\n var altSide = mainAxis === 'y' ? bottom : right;\n var len = mainAxis === 'y' ? 'height' : 'width';\n var offset = popperOffsets[mainAxis];\n var min = offset + overflow[mainSide];\n var max = offset - overflow[altSide];\n var additive = tether ? -popperRect[len] / 2 : 0;\n var minLen = variation === start ? referenceRect[len] : popperRect[len];\n var maxLen = variation === start ? -popperRect[len] : -referenceRect[len]; // We need to include the arrow in the calculation so the arrow doesn't go\n // outside the reference bounds\n\n var arrowElement = state.elements.arrow;\n var arrowRect = tether && arrowElement ? getLayoutRect(arrowElement) : {\n width: 0,\n height: 0\n };\n var arrowPaddingObject = state.modifiersData['arrow#persistent'] ? state.modifiersData['arrow#persistent'].padding : getFreshSideObject();\n var arrowPaddingMin = arrowPaddingObject[mainSide];\n var arrowPaddingMax = arrowPaddingObject[altSide]; // If the reference length is smaller than the arrow length, we don't want\n // to include its full size in the calculation. If the reference is small\n // and near the edge of a boundary, the popper can overflow even if the\n // reference is not overflowing as well (e.g. virtual elements with no\n // width or height)\n\n var arrowLen = within(0, referenceRect[len], arrowRect[len]);\n var minOffset = isBasePlacement ? referenceRect[len] / 2 - additive - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis : minLen - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis;\n var maxOffset = isBasePlacement ? -referenceRect[len] / 2 + additive + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis : maxLen + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis;\n var arrowOffsetParent = state.elements.arrow && getOffsetParent(state.elements.arrow);\n var clientOffset = arrowOffsetParent ? mainAxis === 'y' ? arrowOffsetParent.clientTop || 0 : arrowOffsetParent.clientLeft || 0 : 0;\n var offsetModifierValue = (_offsetModifierState$ = offsetModifierState == null ? void 0 : offsetModifierState[mainAxis]) != null ? _offsetModifierState$ : 0;\n var tetherMin = offset + minOffset - offsetModifierValue - clientOffset;\n var tetherMax = offset + maxOffset - offsetModifierValue;\n var preventedOffset = within(tether ? mathMin(min, tetherMin) : min, offset, tether ? mathMax(max, tetherMax) : max);\n popperOffsets[mainAxis] = preventedOffset;\n data[mainAxis] = preventedOffset - offset;\n }\n\n if (checkAltAxis) {\n var _offsetModifierState$2;\n\n var _mainSide = mainAxis === 'x' ? top : left;\n\n var _altSide = mainAxis === 'x' ? bottom : right;\n\n var _offset = popperOffsets[altAxis];\n\n var _len = altAxis === 'y' ? 'height' : 'width';\n\n var _min = _offset + overflow[_mainSide];\n\n var _max = _offset - overflow[_altSide];\n\n var isOriginSide = [top, left].indexOf(basePlacement) !== -1;\n\n var _offsetModifierValue = (_offsetModifierState$2 = offsetModifierState == null ? void 0 : offsetModifierState[altAxis]) != null ? _offsetModifierState$2 : 0;\n\n var _tetherMin = isOriginSide ? _min : _offset - referenceRect[_len] - popperRect[_len] - _offsetModifierValue + normalizedTetherOffsetValue.altAxis;\n\n var _tetherMax = isOriginSide ? _offset + referenceRect[_len] + popperRect[_len] - _offsetModifierValue - normalizedTetherOffsetValue.altAxis : _max;\n\n var _preventedOffset = tether && isOriginSide ? withinMaxClamp(_tetherMin, _offset, _tetherMax) : within(tether ? _tetherMin : _min, _offset, tether ? _tetherMax : _max);\n\n popperOffsets[altAxis] = _preventedOffset;\n data[altAxis] = _preventedOffset - _offset;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'preventOverflow',\n enabled: true,\n phase: 'main',\n fn: preventOverflow,\n requiresIfExists: ['offset']\n};","export default function getAltAxis(axis) {\n return axis === 'x' ? 'y' : 'x';\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getNodeScroll from \"./getNodeScroll.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport { round } from \"../utils/math.js\";\n\nfunction isElementScaled(element) {\n var rect = element.getBoundingClientRect();\n var scaleX = round(rect.width) / element.offsetWidth || 1;\n var scaleY = round(rect.height) / element.offsetHeight || 1;\n return scaleX !== 1 || scaleY !== 1;\n} // Returns the composite rect of an element relative to its offsetParent.\n// Composite means it takes into account transforms as well as layout.\n\n\nexport default function getCompositeRect(elementOrVirtualElement, offsetParent, isFixed) {\n if (isFixed === void 0) {\n isFixed = false;\n }\n\n var isOffsetParentAnElement = isHTMLElement(offsetParent);\n var offsetParentIsScaled = isHTMLElement(offsetParent) && isElementScaled(offsetParent);\n var documentElement = getDocumentElement(offsetParent);\n var rect = getBoundingClientRect(elementOrVirtualElement, offsetParentIsScaled, isFixed);\n var scroll = {\n scrollLeft: 0,\n scrollTop: 0\n };\n var offsets = {\n x: 0,\n y: 0\n };\n\n if (isOffsetParentAnElement || !isOffsetParentAnElement && !isFixed) {\n if (getNodeName(offsetParent) !== 'body' || // https://github.com/popperjs/popper-core/issues/1078\n isScrollParent(documentElement)) {\n scroll = getNodeScroll(offsetParent);\n }\n\n if (isHTMLElement(offsetParent)) {\n offsets = getBoundingClientRect(offsetParent, true);\n offsets.x += offsetParent.clientLeft;\n offsets.y += offsetParent.clientTop;\n } else if (documentElement) {\n offsets.x = getWindowScrollBarX(documentElement);\n }\n }\n\n return {\n x: rect.left + scroll.scrollLeft - offsets.x,\n y: rect.top + scroll.scrollTop - offsets.y,\n width: rect.width,\n height: rect.height\n };\n}","import getWindowScroll from \"./getWindowScroll.js\";\nimport getWindow from \"./getWindow.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getHTMLElementScroll from \"./getHTMLElementScroll.js\";\nexport default function getNodeScroll(node) {\n if (node === getWindow(node) || !isHTMLElement(node)) {\n return getWindowScroll(node);\n } else {\n return getHTMLElementScroll(node);\n }\n}","export default function getHTMLElementScroll(element) {\n return {\n scrollLeft: element.scrollLeft,\n scrollTop: element.scrollTop\n };\n}","import { modifierPhases } from \"../enums.js\"; // source: https://stackoverflow.com/questions/49875255\n\nfunction order(modifiers) {\n var map = new Map();\n var visited = new Set();\n var result = [];\n modifiers.forEach(function (modifier) {\n map.set(modifier.name, modifier);\n }); // On visiting object, check for its dependencies and visit them recursively\n\n function sort(modifier) {\n visited.add(modifier.name);\n var requires = [].concat(modifier.requires || [], modifier.requiresIfExists || []);\n requires.forEach(function (dep) {\n if (!visited.has(dep)) {\n var depModifier = map.get(dep);\n\n if (depModifier) {\n sort(depModifier);\n }\n }\n });\n result.push(modifier);\n }\n\n modifiers.forEach(function (modifier) {\n if (!visited.has(modifier.name)) {\n // check for visited object\n sort(modifier);\n }\n });\n return result;\n}\n\nexport default function orderModifiers(modifiers) {\n // order based on dependencies\n var orderedModifiers = order(modifiers); // order based on phase\n\n return modifierPhases.reduce(function (acc, phase) {\n return acc.concat(orderedModifiers.filter(function (modifier) {\n return modifier.phase === phase;\n }));\n }, []);\n}","import getCompositeRect from \"./dom-utils/getCompositeRect.js\";\nimport getLayoutRect from \"./dom-utils/getLayoutRect.js\";\nimport listScrollParents from \"./dom-utils/listScrollParents.js\";\nimport getOffsetParent from \"./dom-utils/getOffsetParent.js\";\nimport orderModifiers from \"./utils/orderModifiers.js\";\nimport debounce from \"./utils/debounce.js\";\nimport mergeByName from \"./utils/mergeByName.js\";\nimport detectOverflow from \"./utils/detectOverflow.js\";\nimport { isElement } from \"./dom-utils/instanceOf.js\";\nvar DEFAULT_OPTIONS = {\n placement: 'bottom',\n modifiers: [],\n strategy: 'absolute'\n};\n\nfunction areValidElements() {\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return !args.some(function (element) {\n return !(element && typeof element.getBoundingClientRect === 'function');\n });\n}\n\nexport function popperGenerator(generatorOptions) {\n if (generatorOptions === void 0) {\n generatorOptions = {};\n }\n\n var _generatorOptions = generatorOptions,\n _generatorOptions$def = _generatorOptions.defaultModifiers,\n defaultModifiers = _generatorOptions$def === void 0 ? [] : _generatorOptions$def,\n _generatorOptions$def2 = _generatorOptions.defaultOptions,\n defaultOptions = _generatorOptions$def2 === void 0 ? DEFAULT_OPTIONS : _generatorOptions$def2;\n return function createPopper(reference, popper, options) {\n if (options === void 0) {\n options = defaultOptions;\n }\n\n var state = {\n placement: 'bottom',\n orderedModifiers: [],\n options: Object.assign({}, DEFAULT_OPTIONS, defaultOptions),\n modifiersData: {},\n elements: {\n reference: reference,\n popper: popper\n },\n attributes: {},\n styles: {}\n };\n var effectCleanupFns = [];\n var isDestroyed = false;\n var instance = {\n state: state,\n setOptions: function setOptions(setOptionsAction) {\n var options = typeof setOptionsAction === 'function' ? setOptionsAction(state.options) : setOptionsAction;\n cleanupModifierEffects();\n state.options = Object.assign({}, defaultOptions, state.options, options);\n state.scrollParents = {\n reference: isElement(reference) ? listScrollParents(reference) : reference.contextElement ? listScrollParents(reference.contextElement) : [],\n popper: listScrollParents(popper)\n }; // Orders the modifiers based on their dependencies and `phase`\n // properties\n\n var orderedModifiers = orderModifiers(mergeByName([].concat(defaultModifiers, state.options.modifiers))); // Strip out disabled modifiers\n\n state.orderedModifiers = orderedModifiers.filter(function (m) {\n return m.enabled;\n });\n runModifierEffects();\n return instance.update();\n },\n // Sync update – it will always be executed, even if not necessary. This\n // is useful for low frequency updates where sync behavior simplifies the\n // logic.\n // For high frequency updates (e.g. `resize` and `scroll` events), always\n // prefer the async Popper#update method\n forceUpdate: function forceUpdate() {\n if (isDestroyed) {\n return;\n }\n\n var _state$elements = state.elements,\n reference = _state$elements.reference,\n popper = _state$elements.popper; // Don't proceed if `reference` or `popper` are not valid elements\n // anymore\n\n if (!areValidElements(reference, popper)) {\n return;\n } // Store the reference and popper rects to be read by modifiers\n\n\n state.rects = {\n reference: getCompositeRect(reference, getOffsetParent(popper), state.options.strategy === 'fixed'),\n popper: getLayoutRect(popper)\n }; // Modifiers have the ability to reset the current update cycle. The\n // most common use case for this is the `flip` modifier changing the\n // placement, which then needs to re-run all the modifiers, because the\n // logic was previously ran for the previous placement and is therefore\n // stale/incorrect\n\n state.reset = false;\n state.placement = state.options.placement; // On each update cycle, the `modifiersData` property for each modifier\n // is filled with the initial data specified by the modifier. This means\n // it doesn't persist and is fresh on each update.\n // To ensure persistent data, use `${name}#persistent`\n\n state.orderedModifiers.forEach(function (modifier) {\n return state.modifiersData[modifier.name] = Object.assign({}, modifier.data);\n });\n\n for (var index = 0; index < state.orderedModifiers.length; index++) {\n if (state.reset === true) {\n state.reset = false;\n index = -1;\n continue;\n }\n\n var _state$orderedModifie = state.orderedModifiers[index],\n fn = _state$orderedModifie.fn,\n _state$orderedModifie2 = _state$orderedModifie.options,\n _options = _state$orderedModifie2 === void 0 ? {} : _state$orderedModifie2,\n name = _state$orderedModifie.name;\n\n if (typeof fn === 'function') {\n state = fn({\n state: state,\n options: _options,\n name: name,\n instance: instance\n }) || state;\n }\n }\n },\n // Async and optimistically optimized update – it will not be executed if\n // not necessary (debounced to run at most once-per-tick)\n update: debounce(function () {\n return new Promise(function (resolve) {\n instance.forceUpdate();\n resolve(state);\n });\n }),\n destroy: function destroy() {\n cleanupModifierEffects();\n isDestroyed = true;\n }\n };\n\n if (!areValidElements(reference, popper)) {\n return instance;\n }\n\n instance.setOptions(options).then(function (state) {\n if (!isDestroyed && options.onFirstUpdate) {\n options.onFirstUpdate(state);\n }\n }); // Modifiers have the ability to execute arbitrary code before the first\n // update cycle runs. They will be executed in the same order as the update\n // cycle. This is useful when a modifier adds some persistent data that\n // other modifiers need to use, but the modifier is run after the dependent\n // one.\n\n function runModifierEffects() {\n state.orderedModifiers.forEach(function (_ref) {\n var name = _ref.name,\n _ref$options = _ref.options,\n options = _ref$options === void 0 ? {} : _ref$options,\n effect = _ref.effect;\n\n if (typeof effect === 'function') {\n var cleanupFn = effect({\n state: state,\n name: name,\n instance: instance,\n options: options\n });\n\n var noopFn = function noopFn() {};\n\n effectCleanupFns.push(cleanupFn || noopFn);\n }\n });\n }\n\n function cleanupModifierEffects() {\n effectCleanupFns.forEach(function (fn) {\n return fn();\n });\n effectCleanupFns = [];\n }\n\n return instance;\n };\n}\nexport var createPopper = /*#__PURE__*/popperGenerator(); // eslint-disable-next-line import/no-unused-modules\n\nexport { detectOverflow };","export default function debounce(fn) {\n var pending;\n return function () {\n if (!pending) {\n pending = new Promise(function (resolve) {\n Promise.resolve().then(function () {\n pending = undefined;\n resolve(fn());\n });\n });\n }\n\n return pending;\n };\n}","export default function mergeByName(modifiers) {\n var merged = modifiers.reduce(function (merged, current) {\n var existing = merged[current.name];\n merged[current.name] = existing ? Object.assign({}, existing, current, {\n options: Object.assign({}, existing.options, current.options),\n data: Object.assign({}, existing.data, current.data)\n }) : current;\n return merged;\n }, {}); // IE11 does not support Object.values\n\n return Object.keys(merged).map(function (key) {\n return merged[key];\n });\n}","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nimport offset from \"./modifiers/offset.js\";\nimport flip from \"./modifiers/flip.js\";\nimport preventOverflow from \"./modifiers/preventOverflow.js\";\nimport arrow from \"./modifiers/arrow.js\";\nimport hide from \"./modifiers/hide.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles, offset, flip, preventOverflow, arrow, hide];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow }; // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper as createPopperLite } from \"./popper-lite.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport * from \"./modifiers/index.js\";","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow };","/*!\n * Bootstrap v5.3.3 (https://getbootstrap.com/)\n * Copyright 2011-2024 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */\nimport * as Popper from '@popperjs/core';\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/data.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n/**\n * Constants\n */\n\nconst elementMap = new Map();\nconst Data = {\n set(element, key, instance) {\n if (!elementMap.has(element)) {\n elementMap.set(element, new Map());\n }\n const instanceMap = elementMap.get(element);\n\n // make it clear we only want one instance per element\n // can be removed later when multiple key/instances are fine to be used\n if (!instanceMap.has(key) && instanceMap.size !== 0) {\n // eslint-disable-next-line no-console\n console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(instanceMap.keys())[0]}.`);\n return;\n }\n instanceMap.set(key, instance);\n },\n get(element, key) {\n if (elementMap.has(element)) {\n return elementMap.get(element).get(key) || null;\n }\n return null;\n },\n remove(element, key) {\n if (!elementMap.has(element)) {\n return;\n }\n const instanceMap = elementMap.get(element);\n instanceMap.delete(key);\n\n // free up element references if there are no instances left for an element\n if (instanceMap.size === 0) {\n elementMap.delete(element);\n }\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/index.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst MAX_UID = 1000000;\nconst MILLISECONDS_MULTIPLIER = 1000;\nconst TRANSITION_END = 'transitionend';\n\n/**\n * Properly escape IDs selectors to handle weird IDs\n * @param {string} selector\n * @returns {string}\n */\nconst parseSelector = selector => {\n if (selector && window.CSS && window.CSS.escape) {\n // document.querySelector needs escaping to handle IDs (html5+) containing for instance /\n selector = selector.replace(/#([^\\s\"#']+)/g, (match, id) => `#${CSS.escape(id)}`);\n }\n return selector;\n};\n\n// Shout-out Angus Croll (https://goo.gl/pxwQGp)\nconst toType = object => {\n if (object === null || object === undefined) {\n return `${object}`;\n }\n return Object.prototype.toString.call(object).match(/\\s([a-z]+)/i)[1].toLowerCase();\n};\n\n/**\n * Public Util API\n */\n\nconst getUID = prefix => {\n do {\n prefix += Math.floor(Math.random() * MAX_UID);\n } while (document.getElementById(prefix));\n return prefix;\n};\nconst getTransitionDurationFromElement = element => {\n if (!element) {\n return 0;\n }\n\n // Get transition-duration of the element\n let {\n transitionDuration,\n transitionDelay\n } = window.getComputedStyle(element);\n const floatTransitionDuration = Number.parseFloat(transitionDuration);\n const floatTransitionDelay = Number.parseFloat(transitionDelay);\n\n // Return 0 if element or transition duration is not found\n if (!floatTransitionDuration && !floatTransitionDelay) {\n return 0;\n }\n\n // If multiple durations are defined, take the first\n transitionDuration = transitionDuration.split(',')[0];\n transitionDelay = transitionDelay.split(',')[0];\n return (Number.parseFloat(transitionDuration) + Number.parseFloat(transitionDelay)) * MILLISECONDS_MULTIPLIER;\n};\nconst triggerTransitionEnd = element => {\n element.dispatchEvent(new Event(TRANSITION_END));\n};\nconst isElement = object => {\n if (!object || typeof object !== 'object') {\n return false;\n }\n if (typeof object.jquery !== 'undefined') {\n object = object[0];\n }\n return typeof object.nodeType !== 'undefined';\n};\nconst getElement = object => {\n // it's a jQuery object or a node element\n if (isElement(object)) {\n return object.jquery ? object[0] : object;\n }\n if (typeof object === 'string' && object.length > 0) {\n return document.querySelector(parseSelector(object));\n }\n return null;\n};\nconst isVisible = element => {\n if (!isElement(element) || element.getClientRects().length === 0) {\n return false;\n }\n const elementIsVisible = getComputedStyle(element).getPropertyValue('visibility') === 'visible';\n // Handle `details` element as its content may falsie appear visible when it is closed\n const closedDetails = element.closest('details:not([open])');\n if (!closedDetails) {\n return elementIsVisible;\n }\n if (closedDetails !== element) {\n const summary = element.closest('summary');\n if (summary && summary.parentNode !== closedDetails) {\n return false;\n }\n if (summary === null) {\n return false;\n }\n }\n return elementIsVisible;\n};\nconst isDisabled = element => {\n if (!element || element.nodeType !== Node.ELEMENT_NODE) {\n return true;\n }\n if (element.classList.contains('disabled')) {\n return true;\n }\n if (typeof element.disabled !== 'undefined') {\n return element.disabled;\n }\n return element.hasAttribute('disabled') && element.getAttribute('disabled') !== 'false';\n};\nconst findShadowRoot = element => {\n if (!document.documentElement.attachShadow) {\n return null;\n }\n\n // Can find the shadow root otherwise it'll return the document\n if (typeof element.getRootNode === 'function') {\n const root = element.getRootNode();\n return root instanceof ShadowRoot ? root : null;\n }\n if (element instanceof ShadowRoot) {\n return element;\n }\n\n // when we don't find a shadow root\n if (!element.parentNode) {\n return null;\n }\n return findShadowRoot(element.parentNode);\n};\nconst noop = () => {};\n\n/**\n * Trick to restart an element's animation\n *\n * @param {HTMLElement} element\n * @return void\n *\n * @see https://www.charistheo.io/blog/2021/02/restart-a-css-animation-with-javascript/#restarting-a-css-animation\n */\nconst reflow = element => {\n element.offsetHeight; // eslint-disable-line no-unused-expressions\n};\nconst getjQuery = () => {\n if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {\n return window.jQuery;\n }\n return null;\n};\nconst DOMContentLoadedCallbacks = [];\nconst onDOMContentLoaded = callback => {\n if (document.readyState === 'loading') {\n // add listener on the first call when the document is in loading state\n if (!DOMContentLoadedCallbacks.length) {\n document.addEventListener('DOMContentLoaded', () => {\n for (const callback of DOMContentLoadedCallbacks) {\n callback();\n }\n });\n }\n DOMContentLoadedCallbacks.push(callback);\n } else {\n callback();\n }\n};\nconst isRTL = () => document.documentElement.dir === 'rtl';\nconst defineJQueryPlugin = plugin => {\n onDOMContentLoaded(() => {\n const $ = getjQuery();\n /* istanbul ignore if */\n if ($) {\n const name = plugin.NAME;\n const JQUERY_NO_CONFLICT = $.fn[name];\n $.fn[name] = plugin.jQueryInterface;\n $.fn[name].Constructor = plugin;\n $.fn[name].noConflict = () => {\n $.fn[name] = JQUERY_NO_CONFLICT;\n return plugin.jQueryInterface;\n };\n }\n });\n};\nconst execute = (possibleCallback, args = [], defaultValue = possibleCallback) => {\n return typeof possibleCallback === 'function' ? possibleCallback(...args) : defaultValue;\n};\nconst executeAfterTransition = (callback, transitionElement, waitForTransition = true) => {\n if (!waitForTransition) {\n execute(callback);\n return;\n }\n const durationPadding = 5;\n const emulatedDuration = getTransitionDurationFromElement(transitionElement) + durationPadding;\n let called = false;\n const handler = ({\n target\n }) => {\n if (target !== transitionElement) {\n return;\n }\n called = true;\n transitionElement.removeEventListener(TRANSITION_END, handler);\n execute(callback);\n };\n transitionElement.addEventListener(TRANSITION_END, handler);\n setTimeout(() => {\n if (!called) {\n triggerTransitionEnd(transitionElement);\n }\n }, emulatedDuration);\n};\n\n/**\n * Return the previous/next element of a list.\n *\n * @param {array} list The list of elements\n * @param activeElement The active element\n * @param shouldGetNext Choose to get next or previous element\n * @param isCycleAllowed\n * @return {Element|elem} The proper element\n */\nconst getNextActiveElement = (list, activeElement, shouldGetNext, isCycleAllowed) => {\n const listLength = list.length;\n let index = list.indexOf(activeElement);\n\n // if the element does not exist in the list return an element\n // depending on the direction and if cycle is allowed\n if (index === -1) {\n return !shouldGetNext && isCycleAllowed ? list[listLength - 1] : list[0];\n }\n index += shouldGetNext ? 1 : -1;\n if (isCycleAllowed) {\n index = (index + listLength) % listLength;\n }\n return list[Math.max(0, Math.min(index, listLength - 1))];\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/event-handler.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst namespaceRegex = /[^.]*(?=\\..*)\\.|.*/;\nconst stripNameRegex = /\\..*/;\nconst stripUidRegex = /::\\d+$/;\nconst eventRegistry = {}; // Events storage\nlet uidEvent = 1;\nconst customEvents = {\n mouseenter: 'mouseover',\n mouseleave: 'mouseout'\n};\nconst nativeEvents = new Set(['click', 'dblclick', 'mouseup', 'mousedown', 'contextmenu', 'mousewheel', 'DOMMouseScroll', 'mouseover', 'mouseout', 'mousemove', 'selectstart', 'selectend', 'keydown', 'keypress', 'keyup', 'orientationchange', 'touchstart', 'touchmove', 'touchend', 'touchcancel', 'pointerdown', 'pointermove', 'pointerup', 'pointerleave', 'pointercancel', 'gesturestart', 'gesturechange', 'gestureend', 'focus', 'blur', 'change', 'reset', 'select', 'submit', 'focusin', 'focusout', 'load', 'unload', 'beforeunload', 'resize', 'move', 'DOMContentLoaded', 'readystatechange', 'error', 'abort', 'scroll']);\n\n/**\n * Private methods\n */\n\nfunction makeEventUid(element, uid) {\n return uid && `${uid}::${uidEvent++}` || element.uidEvent || uidEvent++;\n}\nfunction getElementEvents(element) {\n const uid = makeEventUid(element);\n element.uidEvent = uid;\n eventRegistry[uid] = eventRegistry[uid] || {};\n return eventRegistry[uid];\n}\nfunction bootstrapHandler(element, fn) {\n return function handler(event) {\n hydrateObj(event, {\n delegateTarget: element\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, fn);\n }\n return fn.apply(element, [event]);\n };\n}\nfunction bootstrapDelegationHandler(element, selector, fn) {\n return function handler(event) {\n const domElements = element.querySelectorAll(selector);\n for (let {\n target\n } = event; target && target !== this; target = target.parentNode) {\n for (const domElement of domElements) {\n if (domElement !== target) {\n continue;\n }\n hydrateObj(event, {\n delegateTarget: target\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, selector, fn);\n }\n return fn.apply(target, [event]);\n }\n }\n };\n}\nfunction findHandler(events, callable, delegationSelector = null) {\n return Object.values(events).find(event => event.callable === callable && event.delegationSelector === delegationSelector);\n}\nfunction normalizeParameters(originalTypeEvent, handler, delegationFunction) {\n const isDelegated = typeof handler === 'string';\n // TODO: tooltip passes `false` instead of selector, so we need to check\n const callable = isDelegated ? delegationFunction : handler || delegationFunction;\n let typeEvent = getTypeEvent(originalTypeEvent);\n if (!nativeEvents.has(typeEvent)) {\n typeEvent = originalTypeEvent;\n }\n return [isDelegated, callable, typeEvent];\n}\nfunction addHandler(element, originalTypeEvent, handler, delegationFunction, oneOff) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n let [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n\n // in case of mouseenter or mouseleave wrap the handler within a function that checks for its DOM position\n // this prevents the handler from being dispatched the same way as mouseover or mouseout does\n if (originalTypeEvent in customEvents) {\n const wrapFunction = fn => {\n return function (event) {\n if (!event.relatedTarget || event.relatedTarget !== event.delegateTarget && !event.delegateTarget.contains(event.relatedTarget)) {\n return fn.call(this, event);\n }\n };\n };\n callable = wrapFunction(callable);\n }\n const events = getElementEvents(element);\n const handlers = events[typeEvent] || (events[typeEvent] = {});\n const previousFunction = findHandler(handlers, callable, isDelegated ? handler : null);\n if (previousFunction) {\n previousFunction.oneOff = previousFunction.oneOff && oneOff;\n return;\n }\n const uid = makeEventUid(callable, originalTypeEvent.replace(namespaceRegex, ''));\n const fn = isDelegated ? bootstrapDelegationHandler(element, handler, callable) : bootstrapHandler(element, callable);\n fn.delegationSelector = isDelegated ? handler : null;\n fn.callable = callable;\n fn.oneOff = oneOff;\n fn.uidEvent = uid;\n handlers[uid] = fn;\n element.addEventListener(typeEvent, fn, isDelegated);\n}\nfunction removeHandler(element, events, typeEvent, handler, delegationSelector) {\n const fn = findHandler(events[typeEvent], handler, delegationSelector);\n if (!fn) {\n return;\n }\n element.removeEventListener(typeEvent, fn, Boolean(delegationSelector));\n delete events[typeEvent][fn.uidEvent];\n}\nfunction removeNamespacedHandlers(element, events, typeEvent, namespace) {\n const storeElementEvent = events[typeEvent] || {};\n for (const [handlerKey, event] of Object.entries(storeElementEvent)) {\n if (handlerKey.includes(namespace)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n}\nfunction getTypeEvent(event) {\n // allow to get the native events from namespaced events ('click.bs.button' --> 'click')\n event = event.replace(stripNameRegex, '');\n return customEvents[event] || event;\n}\nconst EventHandler = {\n on(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, false);\n },\n one(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, true);\n },\n off(element, originalTypeEvent, handler, delegationFunction) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n const [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n const inNamespace = typeEvent !== originalTypeEvent;\n const events = getElementEvents(element);\n const storeElementEvent = events[typeEvent] || {};\n const isNamespace = originalTypeEvent.startsWith('.');\n if (typeof callable !== 'undefined') {\n // Simplest case: handler is passed, remove that listener ONLY.\n if (!Object.keys(storeElementEvent).length) {\n return;\n }\n removeHandler(element, events, typeEvent, callable, isDelegated ? handler : null);\n return;\n }\n if (isNamespace) {\n for (const elementEvent of Object.keys(events)) {\n removeNamespacedHandlers(element, events, elementEvent, originalTypeEvent.slice(1));\n }\n }\n for (const [keyHandlers, event] of Object.entries(storeElementEvent)) {\n const handlerKey = keyHandlers.replace(stripUidRegex, '');\n if (!inNamespace || originalTypeEvent.includes(handlerKey)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n },\n trigger(element, event, args) {\n if (typeof event !== 'string' || !element) {\n return null;\n }\n const $ = getjQuery();\n const typeEvent = getTypeEvent(event);\n const inNamespace = event !== typeEvent;\n let jQueryEvent = null;\n let bubbles = true;\n let nativeDispatch = true;\n let defaultPrevented = false;\n if (inNamespace && $) {\n jQueryEvent = $.Event(event, args);\n $(element).trigger(jQueryEvent);\n bubbles = !jQueryEvent.isPropagationStopped();\n nativeDispatch = !jQueryEvent.isImmediatePropagationStopped();\n defaultPrevented = jQueryEvent.isDefaultPrevented();\n }\n const evt = hydrateObj(new Event(event, {\n bubbles,\n cancelable: true\n }), args);\n if (defaultPrevented) {\n evt.preventDefault();\n }\n if (nativeDispatch) {\n element.dispatchEvent(evt);\n }\n if (evt.defaultPrevented && jQueryEvent) {\n jQueryEvent.preventDefault();\n }\n return evt;\n }\n};\nfunction hydrateObj(obj, meta = {}) {\n for (const [key, value] of Object.entries(meta)) {\n try {\n obj[key] = value;\n } catch (_unused) {\n Object.defineProperty(obj, key, {\n configurable: true,\n get() {\n return value;\n }\n });\n }\n }\n return obj;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/manipulator.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nfunction normalizeData(value) {\n if (value === 'true') {\n return true;\n }\n if (value === 'false') {\n return false;\n }\n if (value === Number(value).toString()) {\n return Number(value);\n }\n if (value === '' || value === 'null') {\n return null;\n }\n if (typeof value !== 'string') {\n return value;\n }\n try {\n return JSON.parse(decodeURIComponent(value));\n } catch (_unused) {\n return value;\n }\n}\nfunction normalizeDataKey(key) {\n return key.replace(/[A-Z]/g, chr => `-${chr.toLowerCase()}`);\n}\nconst Manipulator = {\n setDataAttribute(element, key, value) {\n element.setAttribute(`data-bs-${normalizeDataKey(key)}`, value);\n },\n removeDataAttribute(element, key) {\n element.removeAttribute(`data-bs-${normalizeDataKey(key)}`);\n },\n getDataAttributes(element) {\n if (!element) {\n return {};\n }\n const attributes = {};\n const bsKeys = Object.keys(element.dataset).filter(key => key.startsWith('bs') && !key.startsWith('bsConfig'));\n for (const key of bsKeys) {\n let pureKey = key.replace(/^bs/, '');\n pureKey = pureKey.charAt(0).toLowerCase() + pureKey.slice(1, pureKey.length);\n attributes[pureKey] = normalizeData(element.dataset[key]);\n }\n return attributes;\n },\n getDataAttribute(element, key) {\n return normalizeData(element.getAttribute(`data-bs-${normalizeDataKey(key)}`));\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/config.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Class definition\n */\n\nclass Config {\n // Getters\n static get Default() {\n return {};\n }\n static get DefaultType() {\n return {};\n }\n static get NAME() {\n throw new Error('You have to implement the static method \"NAME\", for each component!');\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n return config;\n }\n _mergeConfigObj(config, element) {\n const jsonConfig = isElement(element) ? Manipulator.getDataAttribute(element, 'config') : {}; // try to parse\n\n return {\n ...this.constructor.Default,\n ...(typeof jsonConfig === 'object' ? jsonConfig : {}),\n ...(isElement(element) ? Manipulator.getDataAttributes(element) : {}),\n ...(typeof config === 'object' ? config : {})\n };\n }\n _typeCheckConfig(config, configTypes = this.constructor.DefaultType) {\n for (const [property, expectedTypes] of Object.entries(configTypes)) {\n const value = config[property];\n const valueType = isElement(value) ? 'element' : toType(value);\n if (!new RegExp(expectedTypes).test(valueType)) {\n throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option \"${property}\" provided type \"${valueType}\" but expected type \"${expectedTypes}\".`);\n }\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap base-component.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst VERSION = '5.3.3';\n\n/**\n * Class definition\n */\n\nclass BaseComponent extends Config {\n constructor(element, config) {\n super();\n element = getElement(element);\n if (!element) {\n return;\n }\n this._element = element;\n this._config = this._getConfig(config);\n Data.set(this._element, this.constructor.DATA_KEY, this);\n }\n\n // Public\n dispose() {\n Data.remove(this._element, this.constructor.DATA_KEY);\n EventHandler.off(this._element, this.constructor.EVENT_KEY);\n for (const propertyName of Object.getOwnPropertyNames(this)) {\n this[propertyName] = null;\n }\n }\n _queueCallback(callback, element, isAnimated = true) {\n executeAfterTransition(callback, element, isAnimated);\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config, this._element);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n\n // Static\n static getInstance(element) {\n return Data.get(getElement(element), this.DATA_KEY);\n }\n static getOrCreateInstance(element, config = {}) {\n return this.getInstance(element) || new this(element, typeof config === 'object' ? config : null);\n }\n static get VERSION() {\n return VERSION;\n }\n static get DATA_KEY() {\n return `bs.${this.NAME}`;\n }\n static get EVENT_KEY() {\n return `.${this.DATA_KEY}`;\n }\n static eventName(name) {\n return `${name}${this.EVENT_KEY}`;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/selector-engine.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst getSelector = element => {\n let selector = element.getAttribute('data-bs-target');\n if (!selector || selector === '#') {\n let hrefAttribute = element.getAttribute('href');\n\n // The only valid content that could double as a selector are IDs or classes,\n // so everything starting with `#` or `.`. If a \"real\" URL is used as the selector,\n // `document.querySelector` will rightfully complain it is invalid.\n // See https://github.com/twbs/bootstrap/issues/32273\n if (!hrefAttribute || !hrefAttribute.includes('#') && !hrefAttribute.startsWith('.')) {\n return null;\n }\n\n // Just in case some CMS puts out a full URL with the anchor appended\n if (hrefAttribute.includes('#') && !hrefAttribute.startsWith('#')) {\n hrefAttribute = `#${hrefAttribute.split('#')[1]}`;\n }\n selector = hrefAttribute && hrefAttribute !== '#' ? hrefAttribute.trim() : null;\n }\n return selector ? selector.split(',').map(sel => parseSelector(sel)).join(',') : null;\n};\nconst SelectorEngine = {\n find(selector, element = document.documentElement) {\n return [].concat(...Element.prototype.querySelectorAll.call(element, selector));\n },\n findOne(selector, element = document.documentElement) {\n return Element.prototype.querySelector.call(element, selector);\n },\n children(element, selector) {\n return [].concat(...element.children).filter(child => child.matches(selector));\n },\n parents(element, selector) {\n const parents = [];\n let ancestor = element.parentNode.closest(selector);\n while (ancestor) {\n parents.push(ancestor);\n ancestor = ancestor.parentNode.closest(selector);\n }\n return parents;\n },\n prev(element, selector) {\n let previous = element.previousElementSibling;\n while (previous) {\n if (previous.matches(selector)) {\n return [previous];\n }\n previous = previous.previousElementSibling;\n }\n return [];\n },\n // TODO: this is now unused; remove later along with prev()\n next(element, selector) {\n let next = element.nextElementSibling;\n while (next) {\n if (next.matches(selector)) {\n return [next];\n }\n next = next.nextElementSibling;\n }\n return [];\n },\n focusableChildren(element) {\n const focusables = ['a', 'button', 'input', 'textarea', 'select', 'details', '[tabindex]', '[contenteditable=\"true\"]'].map(selector => `${selector}:not([tabindex^=\"-\"])`).join(',');\n return this.find(focusables, element).filter(el => !isDisabled(el) && isVisible(el));\n },\n getSelectorFromElement(element) {\n const selector = getSelector(element);\n if (selector) {\n return SelectorEngine.findOne(selector) ? selector : null;\n }\n return null;\n },\n getElementFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.findOne(selector) : null;\n },\n getMultipleElementsFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.find(selector) : [];\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/component-functions.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst enableDismissTrigger = (component, method = 'hide') => {\n const clickEvent = `click.dismiss${component.EVENT_KEY}`;\n const name = component.NAME;\n EventHandler.on(document, clickEvent, `[data-bs-dismiss=\"${name}\"]`, function (event) {\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n const target = SelectorEngine.getElementFromSelector(this) || this.closest(`.${name}`);\n const instance = component.getOrCreateInstance(target);\n\n // Method argument is left, for Alert and only, as it doesn't implement the 'hide' method\n instance[method]();\n });\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap alert.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$f = 'alert';\nconst DATA_KEY$a = 'bs.alert';\nconst EVENT_KEY$b = `.${DATA_KEY$a}`;\nconst EVENT_CLOSE = `close${EVENT_KEY$b}`;\nconst EVENT_CLOSED = `closed${EVENT_KEY$b}`;\nconst CLASS_NAME_FADE$5 = 'fade';\nconst CLASS_NAME_SHOW$8 = 'show';\n\n/**\n * Class definition\n */\n\nclass Alert extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$f;\n }\n\n // Public\n close() {\n const closeEvent = EventHandler.trigger(this._element, EVENT_CLOSE);\n if (closeEvent.defaultPrevented) {\n return;\n }\n this._element.classList.remove(CLASS_NAME_SHOW$8);\n const isAnimated = this._element.classList.contains(CLASS_NAME_FADE$5);\n this._queueCallback(() => this._destroyElement(), this._element, isAnimated);\n }\n\n // Private\n _destroyElement() {\n this._element.remove();\n EventHandler.trigger(this._element, EVENT_CLOSED);\n this.dispose();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Alert.getOrCreateInstance(this);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nenableDismissTrigger(Alert, 'close');\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Alert);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap button.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$e = 'button';\nconst DATA_KEY$9 = 'bs.button';\nconst EVENT_KEY$a = `.${DATA_KEY$9}`;\nconst DATA_API_KEY$6 = '.data-api';\nconst CLASS_NAME_ACTIVE$3 = 'active';\nconst SELECTOR_DATA_TOGGLE$5 = '[data-bs-toggle=\"button\"]';\nconst EVENT_CLICK_DATA_API$6 = `click${EVENT_KEY$a}${DATA_API_KEY$6}`;\n\n/**\n * Class definition\n */\n\nclass Button extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$e;\n }\n\n // Public\n toggle() {\n // Toggle class and sync the `aria-pressed` attribute with the return value of the `.toggle()` method\n this._element.setAttribute('aria-pressed', this._element.classList.toggle(CLASS_NAME_ACTIVE$3));\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Button.getOrCreateInstance(this);\n if (config === 'toggle') {\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$6, SELECTOR_DATA_TOGGLE$5, event => {\n event.preventDefault();\n const button = event.target.closest(SELECTOR_DATA_TOGGLE$5);\n const data = Button.getOrCreateInstance(button);\n data.toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Button);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/swipe.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$d = 'swipe';\nconst EVENT_KEY$9 = '.bs.swipe';\nconst EVENT_TOUCHSTART = `touchstart${EVENT_KEY$9}`;\nconst EVENT_TOUCHMOVE = `touchmove${EVENT_KEY$9}`;\nconst EVENT_TOUCHEND = `touchend${EVENT_KEY$9}`;\nconst EVENT_POINTERDOWN = `pointerdown${EVENT_KEY$9}`;\nconst EVENT_POINTERUP = `pointerup${EVENT_KEY$9}`;\nconst POINTER_TYPE_TOUCH = 'touch';\nconst POINTER_TYPE_PEN = 'pen';\nconst CLASS_NAME_POINTER_EVENT = 'pointer-event';\nconst SWIPE_THRESHOLD = 40;\nconst Default$c = {\n endCallback: null,\n leftCallback: null,\n rightCallback: null\n};\nconst DefaultType$c = {\n endCallback: '(function|null)',\n leftCallback: '(function|null)',\n rightCallback: '(function|null)'\n};\n\n/**\n * Class definition\n */\n\nclass Swipe extends Config {\n constructor(element, config) {\n super();\n this._element = element;\n if (!element || !Swipe.isSupported()) {\n return;\n }\n this._config = this._getConfig(config);\n this._deltaX = 0;\n this._supportPointerEvents = Boolean(window.PointerEvent);\n this._initEvents();\n }\n\n // Getters\n static get Default() {\n return Default$c;\n }\n static get DefaultType() {\n return DefaultType$c;\n }\n static get NAME() {\n return NAME$d;\n }\n\n // Public\n dispose() {\n EventHandler.off(this._element, EVENT_KEY$9);\n }\n\n // Private\n _start(event) {\n if (!this._supportPointerEvents) {\n this._deltaX = event.touches[0].clientX;\n return;\n }\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX;\n }\n }\n _end(event) {\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX - this._deltaX;\n }\n this._handleSwipe();\n execute(this._config.endCallback);\n }\n _move(event) {\n this._deltaX = event.touches && event.touches.length > 1 ? 0 : event.touches[0].clientX - this._deltaX;\n }\n _handleSwipe() {\n const absDeltaX = Math.abs(this._deltaX);\n if (absDeltaX <= SWIPE_THRESHOLD) {\n return;\n }\n const direction = absDeltaX / this._deltaX;\n this._deltaX = 0;\n if (!direction) {\n return;\n }\n execute(direction > 0 ? this._config.rightCallback : this._config.leftCallback);\n }\n _initEvents() {\n if (this._supportPointerEvents) {\n EventHandler.on(this._element, EVENT_POINTERDOWN, event => this._start(event));\n EventHandler.on(this._element, EVENT_POINTERUP, event => this._end(event));\n this._element.classList.add(CLASS_NAME_POINTER_EVENT);\n } else {\n EventHandler.on(this._element, EVENT_TOUCHSTART, event => this._start(event));\n EventHandler.on(this._element, EVENT_TOUCHMOVE, event => this._move(event));\n EventHandler.on(this._element, EVENT_TOUCHEND, event => this._end(event));\n }\n }\n _eventIsPointerPenTouch(event) {\n return this._supportPointerEvents && (event.pointerType === POINTER_TYPE_PEN || event.pointerType === POINTER_TYPE_TOUCH);\n }\n\n // Static\n static isSupported() {\n return 'ontouchstart' in document.documentElement || navigator.maxTouchPoints > 0;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap carousel.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$c = 'carousel';\nconst DATA_KEY$8 = 'bs.carousel';\nconst EVENT_KEY$8 = `.${DATA_KEY$8}`;\nconst DATA_API_KEY$5 = '.data-api';\nconst ARROW_LEFT_KEY$1 = 'ArrowLeft';\nconst ARROW_RIGHT_KEY$1 = 'ArrowRight';\nconst TOUCHEVENT_COMPAT_WAIT = 500; // Time for mouse compat events to fire after touch\n\nconst ORDER_NEXT = 'next';\nconst ORDER_PREV = 'prev';\nconst DIRECTION_LEFT = 'left';\nconst DIRECTION_RIGHT = 'right';\nconst EVENT_SLIDE = `slide${EVENT_KEY$8}`;\nconst EVENT_SLID = `slid${EVENT_KEY$8}`;\nconst EVENT_KEYDOWN$1 = `keydown${EVENT_KEY$8}`;\nconst EVENT_MOUSEENTER$1 = `mouseenter${EVENT_KEY$8}`;\nconst EVENT_MOUSELEAVE$1 = `mouseleave${EVENT_KEY$8}`;\nconst EVENT_DRAG_START = `dragstart${EVENT_KEY$8}`;\nconst EVENT_LOAD_DATA_API$3 = `load${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst EVENT_CLICK_DATA_API$5 = `click${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst CLASS_NAME_CAROUSEL = 'carousel';\nconst CLASS_NAME_ACTIVE$2 = 'active';\nconst CLASS_NAME_SLIDE = 'slide';\nconst CLASS_NAME_END = 'carousel-item-end';\nconst CLASS_NAME_START = 'carousel-item-start';\nconst CLASS_NAME_NEXT = 'carousel-item-next';\nconst CLASS_NAME_PREV = 'carousel-item-prev';\nconst SELECTOR_ACTIVE = '.active';\nconst SELECTOR_ITEM = '.carousel-item';\nconst SELECTOR_ACTIVE_ITEM = SELECTOR_ACTIVE + SELECTOR_ITEM;\nconst SELECTOR_ITEM_IMG = '.carousel-item img';\nconst SELECTOR_INDICATORS = '.carousel-indicators';\nconst SELECTOR_DATA_SLIDE = '[data-bs-slide], [data-bs-slide-to]';\nconst SELECTOR_DATA_RIDE = '[data-bs-ride=\"carousel\"]';\nconst KEY_TO_DIRECTION = {\n [ARROW_LEFT_KEY$1]: DIRECTION_RIGHT,\n [ARROW_RIGHT_KEY$1]: DIRECTION_LEFT\n};\nconst Default$b = {\n interval: 5000,\n keyboard: true,\n pause: 'hover',\n ride: false,\n touch: true,\n wrap: true\n};\nconst DefaultType$b = {\n interval: '(number|boolean)',\n // TODO:v6 remove boolean support\n keyboard: 'boolean',\n pause: '(string|boolean)',\n ride: '(boolean|string)',\n touch: 'boolean',\n wrap: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Carousel extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._interval = null;\n this._activeElement = null;\n this._isSliding = false;\n this.touchTimeout = null;\n this._swipeHelper = null;\n this._indicatorsElement = SelectorEngine.findOne(SELECTOR_INDICATORS, this._element);\n this._addEventListeners();\n if (this._config.ride === CLASS_NAME_CAROUSEL) {\n this.cycle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$b;\n }\n static get DefaultType() {\n return DefaultType$b;\n }\n static get NAME() {\n return NAME$c;\n }\n\n // Public\n next() {\n this._slide(ORDER_NEXT);\n }\n nextWhenVisible() {\n // FIXME TODO use `document.visibilityState`\n // Don't call next when the page isn't visible\n // or the carousel or its parent isn't visible\n if (!document.hidden && isVisible(this._element)) {\n this.next();\n }\n }\n prev() {\n this._slide(ORDER_PREV);\n }\n pause() {\n if (this._isSliding) {\n triggerTransitionEnd(this._element);\n }\n this._clearInterval();\n }\n cycle() {\n this._clearInterval();\n this._updateInterval();\n this._interval = setInterval(() => this.nextWhenVisible(), this._config.interval);\n }\n _maybeEnableCycle() {\n if (!this._config.ride) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.cycle());\n return;\n }\n this.cycle();\n }\n to(index) {\n const items = this._getItems();\n if (index > items.length - 1 || index < 0) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.to(index));\n return;\n }\n const activeIndex = this._getItemIndex(this._getActive());\n if (activeIndex === index) {\n return;\n }\n const order = index > activeIndex ? ORDER_NEXT : ORDER_PREV;\n this._slide(order, items[index]);\n }\n dispose() {\n if (this._swipeHelper) {\n this._swipeHelper.dispose();\n }\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n config.defaultInterval = config.interval;\n return config;\n }\n _addEventListeners() {\n if (this._config.keyboard) {\n EventHandler.on(this._element, EVENT_KEYDOWN$1, event => this._keydown(event));\n }\n if (this._config.pause === 'hover') {\n EventHandler.on(this._element, EVENT_MOUSEENTER$1, () => this.pause());\n EventHandler.on(this._element, EVENT_MOUSELEAVE$1, () => this._maybeEnableCycle());\n }\n if (this._config.touch && Swipe.isSupported()) {\n this._addTouchEventListeners();\n }\n }\n _addTouchEventListeners() {\n for (const img of SelectorEngine.find(SELECTOR_ITEM_IMG, this._element)) {\n EventHandler.on(img, EVENT_DRAG_START, event => event.preventDefault());\n }\n const endCallBack = () => {\n if (this._config.pause !== 'hover') {\n return;\n }\n\n // If it's a touch-enabled device, mouseenter/leave are fired as\n // part of the mouse compatibility events on first tap - the carousel\n // would stop cycling until user tapped out of it;\n // here, we listen for touchend, explicitly pause the carousel\n // (as if it's the second time we tap on it, mouseenter compat event\n // is NOT fired) and after a timeout (to allow for mouse compatibility\n // events to fire) we explicitly restart cycling\n\n this.pause();\n if (this.touchTimeout) {\n clearTimeout(this.touchTimeout);\n }\n this.touchTimeout = setTimeout(() => this._maybeEnableCycle(), TOUCHEVENT_COMPAT_WAIT + this._config.interval);\n };\n const swipeConfig = {\n leftCallback: () => this._slide(this._directionToOrder(DIRECTION_LEFT)),\n rightCallback: () => this._slide(this._directionToOrder(DIRECTION_RIGHT)),\n endCallback: endCallBack\n };\n this._swipeHelper = new Swipe(this._element, swipeConfig);\n }\n _keydown(event) {\n if (/input|textarea/i.test(event.target.tagName)) {\n return;\n }\n const direction = KEY_TO_DIRECTION[event.key];\n if (direction) {\n event.preventDefault();\n this._slide(this._directionToOrder(direction));\n }\n }\n _getItemIndex(element) {\n return this._getItems().indexOf(element);\n }\n _setActiveIndicatorElement(index) {\n if (!this._indicatorsElement) {\n return;\n }\n const activeIndicator = SelectorEngine.findOne(SELECTOR_ACTIVE, this._indicatorsElement);\n activeIndicator.classList.remove(CLASS_NAME_ACTIVE$2);\n activeIndicator.removeAttribute('aria-current');\n const newActiveIndicator = SelectorEngine.findOne(`[data-bs-slide-to=\"${index}\"]`, this._indicatorsElement);\n if (newActiveIndicator) {\n newActiveIndicator.classList.add(CLASS_NAME_ACTIVE$2);\n newActiveIndicator.setAttribute('aria-current', 'true');\n }\n }\n _updateInterval() {\n const element = this._activeElement || this._getActive();\n if (!element) {\n return;\n }\n const elementInterval = Number.parseInt(element.getAttribute('data-bs-interval'), 10);\n this._config.interval = elementInterval || this._config.defaultInterval;\n }\n _slide(order, element = null) {\n if (this._isSliding) {\n return;\n }\n const activeElement = this._getActive();\n const isNext = order === ORDER_NEXT;\n const nextElement = element || getNextActiveElement(this._getItems(), activeElement, isNext, this._config.wrap);\n if (nextElement === activeElement) {\n return;\n }\n const nextElementIndex = this._getItemIndex(nextElement);\n const triggerEvent = eventName => {\n return EventHandler.trigger(this._element, eventName, {\n relatedTarget: nextElement,\n direction: this._orderToDirection(order),\n from: this._getItemIndex(activeElement),\n to: nextElementIndex\n });\n };\n const slideEvent = triggerEvent(EVENT_SLIDE);\n if (slideEvent.defaultPrevented) {\n return;\n }\n if (!activeElement || !nextElement) {\n // Some weirdness is happening, so we bail\n // TODO: change tests that use empty divs to avoid this check\n return;\n }\n const isCycling = Boolean(this._interval);\n this.pause();\n this._isSliding = true;\n this._setActiveIndicatorElement(nextElementIndex);\n this._activeElement = nextElement;\n const directionalClassName = isNext ? CLASS_NAME_START : CLASS_NAME_END;\n const orderClassName = isNext ? CLASS_NAME_NEXT : CLASS_NAME_PREV;\n nextElement.classList.add(orderClassName);\n reflow(nextElement);\n activeElement.classList.add(directionalClassName);\n nextElement.classList.add(directionalClassName);\n const completeCallBack = () => {\n nextElement.classList.remove(directionalClassName, orderClassName);\n nextElement.classList.add(CLASS_NAME_ACTIVE$2);\n activeElement.classList.remove(CLASS_NAME_ACTIVE$2, orderClassName, directionalClassName);\n this._isSliding = false;\n triggerEvent(EVENT_SLID);\n };\n this._queueCallback(completeCallBack, activeElement, this._isAnimated());\n if (isCycling) {\n this.cycle();\n }\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_SLIDE);\n }\n _getActive() {\n return SelectorEngine.findOne(SELECTOR_ACTIVE_ITEM, this._element);\n }\n _getItems() {\n return SelectorEngine.find(SELECTOR_ITEM, this._element);\n }\n _clearInterval() {\n if (this._interval) {\n clearInterval(this._interval);\n this._interval = null;\n }\n }\n _directionToOrder(direction) {\n if (isRTL()) {\n return direction === DIRECTION_LEFT ? ORDER_PREV : ORDER_NEXT;\n }\n return direction === DIRECTION_LEFT ? ORDER_NEXT : ORDER_PREV;\n }\n _orderToDirection(order) {\n if (isRTL()) {\n return order === ORDER_PREV ? DIRECTION_LEFT : DIRECTION_RIGHT;\n }\n return order === ORDER_PREV ? DIRECTION_RIGHT : DIRECTION_LEFT;\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Carousel.getOrCreateInstance(this, config);\n if (typeof config === 'number') {\n data.to(config);\n return;\n }\n if (typeof config === 'string') {\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$5, SELECTOR_DATA_SLIDE, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (!target || !target.classList.contains(CLASS_NAME_CAROUSEL)) {\n return;\n }\n event.preventDefault();\n const carousel = Carousel.getOrCreateInstance(target);\n const slideIndex = this.getAttribute('data-bs-slide-to');\n if (slideIndex) {\n carousel.to(slideIndex);\n carousel._maybeEnableCycle();\n return;\n }\n if (Manipulator.getDataAttribute(this, 'slide') === 'next') {\n carousel.next();\n carousel._maybeEnableCycle();\n return;\n }\n carousel.prev();\n carousel._maybeEnableCycle();\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$3, () => {\n const carousels = SelectorEngine.find(SELECTOR_DATA_RIDE);\n for (const carousel of carousels) {\n Carousel.getOrCreateInstance(carousel);\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Carousel);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap collapse.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$b = 'collapse';\nconst DATA_KEY$7 = 'bs.collapse';\nconst EVENT_KEY$7 = `.${DATA_KEY$7}`;\nconst DATA_API_KEY$4 = '.data-api';\nconst EVENT_SHOW$6 = `show${EVENT_KEY$7}`;\nconst EVENT_SHOWN$6 = `shown${EVENT_KEY$7}`;\nconst EVENT_HIDE$6 = `hide${EVENT_KEY$7}`;\nconst EVENT_HIDDEN$6 = `hidden${EVENT_KEY$7}`;\nconst EVENT_CLICK_DATA_API$4 = `click${EVENT_KEY$7}${DATA_API_KEY$4}`;\nconst CLASS_NAME_SHOW$7 = 'show';\nconst CLASS_NAME_COLLAPSE = 'collapse';\nconst CLASS_NAME_COLLAPSING = 'collapsing';\nconst CLASS_NAME_COLLAPSED = 'collapsed';\nconst CLASS_NAME_DEEPER_CHILDREN = `:scope .${CLASS_NAME_COLLAPSE} .${CLASS_NAME_COLLAPSE}`;\nconst CLASS_NAME_HORIZONTAL = 'collapse-horizontal';\nconst WIDTH = 'width';\nconst HEIGHT = 'height';\nconst SELECTOR_ACTIVES = '.collapse.show, .collapse.collapsing';\nconst SELECTOR_DATA_TOGGLE$4 = '[data-bs-toggle=\"collapse\"]';\nconst Default$a = {\n parent: null,\n toggle: true\n};\nconst DefaultType$a = {\n parent: '(null|element)',\n toggle: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Collapse extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isTransitioning = false;\n this._triggerArray = [];\n const toggleList = SelectorEngine.find(SELECTOR_DATA_TOGGLE$4);\n for (const elem of toggleList) {\n const selector = SelectorEngine.getSelectorFromElement(elem);\n const filterElement = SelectorEngine.find(selector).filter(foundElement => foundElement === this._element);\n if (selector !== null && filterElement.length) {\n this._triggerArray.push(elem);\n }\n }\n this._initializeChildren();\n if (!this._config.parent) {\n this._addAriaAndCollapsedClass(this._triggerArray, this._isShown());\n }\n if (this._config.toggle) {\n this.toggle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$a;\n }\n static get DefaultType() {\n return DefaultType$a;\n }\n static get NAME() {\n return NAME$b;\n }\n\n // Public\n toggle() {\n if (this._isShown()) {\n this.hide();\n } else {\n this.show();\n }\n }\n show() {\n if (this._isTransitioning || this._isShown()) {\n return;\n }\n let activeChildren = [];\n\n // find active children\n if (this._config.parent) {\n activeChildren = this._getFirstLevelChildren(SELECTOR_ACTIVES).filter(element => element !== this._element).map(element => Collapse.getOrCreateInstance(element, {\n toggle: false\n }));\n }\n if (activeChildren.length && activeChildren[0]._isTransitioning) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_SHOW$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n for (const activeInstance of activeChildren) {\n activeInstance.hide();\n }\n const dimension = this._getDimension();\n this._element.classList.remove(CLASS_NAME_COLLAPSE);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.style[dimension] = 0;\n this._addAriaAndCollapsedClass(this._triggerArray, true);\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n this._element.style[dimension] = '';\n EventHandler.trigger(this._element, EVENT_SHOWN$6);\n };\n const capitalizedDimension = dimension[0].toUpperCase() + dimension.slice(1);\n const scrollSize = `scroll${capitalizedDimension}`;\n this._queueCallback(complete, this._element, true);\n this._element.style[dimension] = `${this._element[scrollSize]}px`;\n }\n hide() {\n if (this._isTransitioning || !this._isShown()) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_HIDE$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n const dimension = this._getDimension();\n this._element.style[dimension] = `${this._element.getBoundingClientRect()[dimension]}px`;\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.classList.remove(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n for (const trigger of this._triggerArray) {\n const element = SelectorEngine.getElementFromSelector(trigger);\n if (element && !this._isShown(element)) {\n this._addAriaAndCollapsedClass([trigger], false);\n }\n }\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE);\n EventHandler.trigger(this._element, EVENT_HIDDEN$6);\n };\n this._element.style[dimension] = '';\n this._queueCallback(complete, this._element, true);\n }\n _isShown(element = this._element) {\n return element.classList.contains(CLASS_NAME_SHOW$7);\n }\n\n // Private\n _configAfterMerge(config) {\n config.toggle = Boolean(config.toggle); // Coerce string values\n config.parent = getElement(config.parent);\n return config;\n }\n _getDimension() {\n return this._element.classList.contains(CLASS_NAME_HORIZONTAL) ? WIDTH : HEIGHT;\n }\n _initializeChildren() {\n if (!this._config.parent) {\n return;\n }\n const children = this._getFirstLevelChildren(SELECTOR_DATA_TOGGLE$4);\n for (const element of children) {\n const selected = SelectorEngine.getElementFromSelector(element);\n if (selected) {\n this._addAriaAndCollapsedClass([element], this._isShown(selected));\n }\n }\n }\n _getFirstLevelChildren(selector) {\n const children = SelectorEngine.find(CLASS_NAME_DEEPER_CHILDREN, this._config.parent);\n // remove children if greater depth\n return SelectorEngine.find(selector, this._config.parent).filter(element => !children.includes(element));\n }\n _addAriaAndCollapsedClass(triggerArray, isOpen) {\n if (!triggerArray.length) {\n return;\n }\n for (const element of triggerArray) {\n element.classList.toggle(CLASS_NAME_COLLAPSED, !isOpen);\n element.setAttribute('aria-expanded', isOpen);\n }\n }\n\n // Static\n static jQueryInterface(config) {\n const _config = {};\n if (typeof config === 'string' && /show|hide/.test(config)) {\n _config.toggle = false;\n }\n return this.each(function () {\n const data = Collapse.getOrCreateInstance(this, _config);\n if (typeof config === 'string') {\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$4, SELECTOR_DATA_TOGGLE$4, function (event) {\n // preventDefault only for elements (which change the URL) not inside the collapsible element\n if (event.target.tagName === 'A' || event.delegateTarget && event.delegateTarget.tagName === 'A') {\n event.preventDefault();\n }\n for (const element of SelectorEngine.getMultipleElementsFromSelector(this)) {\n Collapse.getOrCreateInstance(element, {\n toggle: false\n }).toggle();\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Collapse);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dropdown.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$a = 'dropdown';\nconst DATA_KEY$6 = 'bs.dropdown';\nconst EVENT_KEY$6 = `.${DATA_KEY$6}`;\nconst DATA_API_KEY$3 = '.data-api';\nconst ESCAPE_KEY$2 = 'Escape';\nconst TAB_KEY$1 = 'Tab';\nconst ARROW_UP_KEY$1 = 'ArrowUp';\nconst ARROW_DOWN_KEY$1 = 'ArrowDown';\nconst RIGHT_MOUSE_BUTTON = 2; // MouseEvent.button value for the secondary button, usually the right button\n\nconst EVENT_HIDE$5 = `hide${EVENT_KEY$6}`;\nconst EVENT_HIDDEN$5 = `hidden${EVENT_KEY$6}`;\nconst EVENT_SHOW$5 = `show${EVENT_KEY$6}`;\nconst EVENT_SHOWN$5 = `shown${EVENT_KEY$6}`;\nconst EVENT_CLICK_DATA_API$3 = `click${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYDOWN_DATA_API = `keydown${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYUP_DATA_API = `keyup${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst CLASS_NAME_SHOW$6 = 'show';\nconst CLASS_NAME_DROPUP = 'dropup';\nconst CLASS_NAME_DROPEND = 'dropend';\nconst CLASS_NAME_DROPSTART = 'dropstart';\nconst CLASS_NAME_DROPUP_CENTER = 'dropup-center';\nconst CLASS_NAME_DROPDOWN_CENTER = 'dropdown-center';\nconst SELECTOR_DATA_TOGGLE$3 = '[data-bs-toggle=\"dropdown\"]:not(.disabled):not(:disabled)';\nconst SELECTOR_DATA_TOGGLE_SHOWN = `${SELECTOR_DATA_TOGGLE$3}.${CLASS_NAME_SHOW$6}`;\nconst SELECTOR_MENU = '.dropdown-menu';\nconst SELECTOR_NAVBAR = '.navbar';\nconst SELECTOR_NAVBAR_NAV = '.navbar-nav';\nconst SELECTOR_VISIBLE_ITEMS = '.dropdown-menu .dropdown-item:not(.disabled):not(:disabled)';\nconst PLACEMENT_TOP = isRTL() ? 'top-end' : 'top-start';\nconst PLACEMENT_TOPEND = isRTL() ? 'top-start' : 'top-end';\nconst PLACEMENT_BOTTOM = isRTL() ? 'bottom-end' : 'bottom-start';\nconst PLACEMENT_BOTTOMEND = isRTL() ? 'bottom-start' : 'bottom-end';\nconst PLACEMENT_RIGHT = isRTL() ? 'left-start' : 'right-start';\nconst PLACEMENT_LEFT = isRTL() ? 'right-start' : 'left-start';\nconst PLACEMENT_TOPCENTER = 'top';\nconst PLACEMENT_BOTTOMCENTER = 'bottom';\nconst Default$9 = {\n autoClose: true,\n boundary: 'clippingParents',\n display: 'dynamic',\n offset: [0, 2],\n popperConfig: null,\n reference: 'toggle'\n};\nconst DefaultType$9 = {\n autoClose: '(boolean|string)',\n boundary: '(string|element)',\n display: 'string',\n offset: '(array|string|function)',\n popperConfig: '(null|object|function)',\n reference: '(string|element|object)'\n};\n\n/**\n * Class definition\n */\n\nclass Dropdown extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._popper = null;\n this._parent = this._element.parentNode; // dropdown wrapper\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n this._menu = SelectorEngine.next(this._element, SELECTOR_MENU)[0] || SelectorEngine.prev(this._element, SELECTOR_MENU)[0] || SelectorEngine.findOne(SELECTOR_MENU, this._parent);\n this._inNavbar = this._detectNavbar();\n }\n\n // Getters\n static get Default() {\n return Default$9;\n }\n static get DefaultType() {\n return DefaultType$9;\n }\n static get NAME() {\n return NAME$a;\n }\n\n // Public\n toggle() {\n return this._isShown() ? this.hide() : this.show();\n }\n show() {\n if (isDisabled(this._element) || this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$5, relatedTarget);\n if (showEvent.defaultPrevented) {\n return;\n }\n this._createPopper();\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement && !this._parent.closest(SELECTOR_NAVBAR_NAV)) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n this._element.focus();\n this._element.setAttribute('aria-expanded', true);\n this._menu.classList.add(CLASS_NAME_SHOW$6);\n this._element.classList.add(CLASS_NAME_SHOW$6);\n EventHandler.trigger(this._element, EVENT_SHOWN$5, relatedTarget);\n }\n hide() {\n if (isDisabled(this._element) || !this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n this._completeHide(relatedTarget);\n }\n dispose() {\n if (this._popper) {\n this._popper.destroy();\n }\n super.dispose();\n }\n update() {\n this._inNavbar = this._detectNavbar();\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Private\n _completeHide(relatedTarget) {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$5, relatedTarget);\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n if (this._popper) {\n this._popper.destroy();\n }\n this._menu.classList.remove(CLASS_NAME_SHOW$6);\n this._element.classList.remove(CLASS_NAME_SHOW$6);\n this._element.setAttribute('aria-expanded', 'false');\n Manipulator.removeDataAttribute(this._menu, 'popper');\n EventHandler.trigger(this._element, EVENT_HIDDEN$5, relatedTarget);\n }\n _getConfig(config) {\n config = super._getConfig(config);\n if (typeof config.reference === 'object' && !isElement(config.reference) && typeof config.reference.getBoundingClientRect !== 'function') {\n // Popper virtual elements require a getBoundingClientRect method\n throw new TypeError(`${NAME$a.toUpperCase()}: Option \"reference\" provided type \"object\" without a required \"getBoundingClientRect\" method.`);\n }\n return config;\n }\n _createPopper() {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s dropdowns require Popper (https://popper.js.org)');\n }\n let referenceElement = this._element;\n if (this._config.reference === 'parent') {\n referenceElement = this._parent;\n } else if (isElement(this._config.reference)) {\n referenceElement = getElement(this._config.reference);\n } else if (typeof this._config.reference === 'object') {\n referenceElement = this._config.reference;\n }\n const popperConfig = this._getPopperConfig();\n this._popper = Popper.createPopper(referenceElement, this._menu, popperConfig);\n }\n _isShown() {\n return this._menu.classList.contains(CLASS_NAME_SHOW$6);\n }\n _getPlacement() {\n const parentDropdown = this._parent;\n if (parentDropdown.classList.contains(CLASS_NAME_DROPEND)) {\n return PLACEMENT_RIGHT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPSTART)) {\n return PLACEMENT_LEFT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP_CENTER)) {\n return PLACEMENT_TOPCENTER;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPDOWN_CENTER)) {\n return PLACEMENT_BOTTOMCENTER;\n }\n\n // We need to trim the value because custom properties can also include spaces\n const isEnd = getComputedStyle(this._menu).getPropertyValue('--bs-position').trim() === 'end';\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP)) {\n return isEnd ? PLACEMENT_TOPEND : PLACEMENT_TOP;\n }\n return isEnd ? PLACEMENT_BOTTOMEND : PLACEMENT_BOTTOM;\n }\n _detectNavbar() {\n return this._element.closest(SELECTOR_NAVBAR) !== null;\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _getPopperConfig() {\n const defaultBsPopperConfig = {\n placement: this._getPlacement(),\n modifiers: [{\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }]\n };\n\n // Disable Popper if we have a static display or Dropdown is in Navbar\n if (this._inNavbar || this._config.display === 'static') {\n Manipulator.setDataAttribute(this._menu, 'popper', 'static'); // TODO: v6 remove\n defaultBsPopperConfig.modifiers = [{\n name: 'applyStyles',\n enabled: false\n }];\n }\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _selectMenuItem({\n key,\n target\n }) {\n const items = SelectorEngine.find(SELECTOR_VISIBLE_ITEMS, this._menu).filter(element => isVisible(element));\n if (!items.length) {\n return;\n }\n\n // if target isn't included in items (e.g. when expanding the dropdown)\n // allow cycling to get the last item in case key equals ARROW_UP_KEY\n getNextActiveElement(items, target, key === ARROW_DOWN_KEY$1, !items.includes(target)).focus();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Dropdown.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n static clearMenus(event) {\n if (event.button === RIGHT_MOUSE_BUTTON || event.type === 'keyup' && event.key !== TAB_KEY$1) {\n return;\n }\n const openToggles = SelectorEngine.find(SELECTOR_DATA_TOGGLE_SHOWN);\n for (const toggle of openToggles) {\n const context = Dropdown.getInstance(toggle);\n if (!context || context._config.autoClose === false) {\n continue;\n }\n const composedPath = event.composedPath();\n const isMenuTarget = composedPath.includes(context._menu);\n if (composedPath.includes(context._element) || context._config.autoClose === 'inside' && !isMenuTarget || context._config.autoClose === 'outside' && isMenuTarget) {\n continue;\n }\n\n // Tab navigation through the dropdown menu or events from contained inputs shouldn't close the menu\n if (context._menu.contains(event.target) && (event.type === 'keyup' && event.key === TAB_KEY$1 || /input|select|option|textarea|form/i.test(event.target.tagName))) {\n continue;\n }\n const relatedTarget = {\n relatedTarget: context._element\n };\n if (event.type === 'click') {\n relatedTarget.clickEvent = event;\n }\n context._completeHide(relatedTarget);\n }\n }\n static dataApiKeydownHandler(event) {\n // If not an UP | DOWN | ESCAPE key => not a dropdown command\n // If input/textarea && if key is other than ESCAPE => not a dropdown command\n\n const isInput = /input|textarea/i.test(event.target.tagName);\n const isEscapeEvent = event.key === ESCAPE_KEY$2;\n const isUpOrDownEvent = [ARROW_UP_KEY$1, ARROW_DOWN_KEY$1].includes(event.key);\n if (!isUpOrDownEvent && !isEscapeEvent) {\n return;\n }\n if (isInput && !isEscapeEvent) {\n return;\n }\n event.preventDefault();\n\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n const getToggleButton = this.matches(SELECTOR_DATA_TOGGLE$3) ? this : SelectorEngine.prev(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.next(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.findOne(SELECTOR_DATA_TOGGLE$3, event.delegateTarget.parentNode);\n const instance = Dropdown.getOrCreateInstance(getToggleButton);\n if (isUpOrDownEvent) {\n event.stopPropagation();\n instance.show();\n instance._selectMenuItem(event);\n return;\n }\n if (instance._isShown()) {\n // else is escape and we check if it is shown\n event.stopPropagation();\n instance.hide();\n getToggleButton.focus();\n }\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_DATA_TOGGLE$3, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_MENU, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_KEYUP_DATA_API, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, SELECTOR_DATA_TOGGLE$3, function (event) {\n event.preventDefault();\n Dropdown.getOrCreateInstance(this).toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Dropdown);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/backdrop.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$9 = 'backdrop';\nconst CLASS_NAME_FADE$4 = 'fade';\nconst CLASS_NAME_SHOW$5 = 'show';\nconst EVENT_MOUSEDOWN = `mousedown.bs.${NAME$9}`;\nconst Default$8 = {\n className: 'modal-backdrop',\n clickCallback: null,\n isAnimated: false,\n isVisible: true,\n // if false, we use the backdrop helper without adding any element to the dom\n rootElement: 'body' // give the choice to place backdrop under different elements\n};\nconst DefaultType$8 = {\n className: 'string',\n clickCallback: '(function|null)',\n isAnimated: 'boolean',\n isVisible: 'boolean',\n rootElement: '(element|string)'\n};\n\n/**\n * Class definition\n */\n\nclass Backdrop extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isAppended = false;\n this._element = null;\n }\n\n // Getters\n static get Default() {\n return Default$8;\n }\n static get DefaultType() {\n return DefaultType$8;\n }\n static get NAME() {\n return NAME$9;\n }\n\n // Public\n show(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._append();\n const element = this._getElement();\n if (this._config.isAnimated) {\n reflow(element);\n }\n element.classList.add(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n execute(callback);\n });\n }\n hide(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._getElement().classList.remove(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n this.dispose();\n execute(callback);\n });\n }\n dispose() {\n if (!this._isAppended) {\n return;\n }\n EventHandler.off(this._element, EVENT_MOUSEDOWN);\n this._element.remove();\n this._isAppended = false;\n }\n\n // Private\n _getElement() {\n if (!this._element) {\n const backdrop = document.createElement('div');\n backdrop.className = this._config.className;\n if (this._config.isAnimated) {\n backdrop.classList.add(CLASS_NAME_FADE$4);\n }\n this._element = backdrop;\n }\n return this._element;\n }\n _configAfterMerge(config) {\n // use getElement() with the default \"body\" to get a fresh Element on each instantiation\n config.rootElement = getElement(config.rootElement);\n return config;\n }\n _append() {\n if (this._isAppended) {\n return;\n }\n const element = this._getElement();\n this._config.rootElement.append(element);\n EventHandler.on(element, EVENT_MOUSEDOWN, () => {\n execute(this._config.clickCallback);\n });\n this._isAppended = true;\n }\n _emulateAnimation(callback) {\n executeAfterTransition(callback, this._getElement(), this._config.isAnimated);\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/focustrap.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$8 = 'focustrap';\nconst DATA_KEY$5 = 'bs.focustrap';\nconst EVENT_KEY$5 = `.${DATA_KEY$5}`;\nconst EVENT_FOCUSIN$2 = `focusin${EVENT_KEY$5}`;\nconst EVENT_KEYDOWN_TAB = `keydown.tab${EVENT_KEY$5}`;\nconst TAB_KEY = 'Tab';\nconst TAB_NAV_FORWARD = 'forward';\nconst TAB_NAV_BACKWARD = 'backward';\nconst Default$7 = {\n autofocus: true,\n trapElement: null // The element to trap focus inside of\n};\nconst DefaultType$7 = {\n autofocus: 'boolean',\n trapElement: 'element'\n};\n\n/**\n * Class definition\n */\n\nclass FocusTrap extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isActive = false;\n this._lastTabNavDirection = null;\n }\n\n // Getters\n static get Default() {\n return Default$7;\n }\n static get DefaultType() {\n return DefaultType$7;\n }\n static get NAME() {\n return NAME$8;\n }\n\n // Public\n activate() {\n if (this._isActive) {\n return;\n }\n if (this._config.autofocus) {\n this._config.trapElement.focus();\n }\n EventHandler.off(document, EVENT_KEY$5); // guard against infinite focus loop\n EventHandler.on(document, EVENT_FOCUSIN$2, event => this._handleFocusin(event));\n EventHandler.on(document, EVENT_KEYDOWN_TAB, event => this._handleKeydown(event));\n this._isActive = true;\n }\n deactivate() {\n if (!this._isActive) {\n return;\n }\n this._isActive = false;\n EventHandler.off(document, EVENT_KEY$5);\n }\n\n // Private\n _handleFocusin(event) {\n const {\n trapElement\n } = this._config;\n if (event.target === document || event.target === trapElement || trapElement.contains(event.target)) {\n return;\n }\n const elements = SelectorEngine.focusableChildren(trapElement);\n if (elements.length === 0) {\n trapElement.focus();\n } else if (this._lastTabNavDirection === TAB_NAV_BACKWARD) {\n elements[elements.length - 1].focus();\n } else {\n elements[0].focus();\n }\n }\n _handleKeydown(event) {\n if (event.key !== TAB_KEY) {\n return;\n }\n this._lastTabNavDirection = event.shiftKey ? TAB_NAV_BACKWARD : TAB_NAV_FORWARD;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/scrollBar.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst SELECTOR_FIXED_CONTENT = '.fixed-top, .fixed-bottom, .is-fixed, .sticky-top';\nconst SELECTOR_STICKY_CONTENT = '.sticky-top';\nconst PROPERTY_PADDING = 'padding-right';\nconst PROPERTY_MARGIN = 'margin-right';\n\n/**\n * Class definition\n */\n\nclass ScrollBarHelper {\n constructor() {\n this._element = document.body;\n }\n\n // Public\n getWidth() {\n // https://developer.mozilla.org/en-US/docs/Web/API/Window/innerWidth#usage_notes\n const documentWidth = document.documentElement.clientWidth;\n return Math.abs(window.innerWidth - documentWidth);\n }\n hide() {\n const width = this.getWidth();\n this._disableOverFlow();\n // give padding to element to balance the hidden scrollbar width\n this._setElementAttributes(this._element, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n // trick: We adjust positive paddingRight and negative marginRight to sticky-top elements to keep showing fullwidth\n this._setElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n this._setElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN, calculatedValue => calculatedValue - width);\n }\n reset() {\n this._resetElementAttributes(this._element, 'overflow');\n this._resetElementAttributes(this._element, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN);\n }\n isOverflowing() {\n return this.getWidth() > 0;\n }\n\n // Private\n _disableOverFlow() {\n this._saveInitialAttribute(this._element, 'overflow');\n this._element.style.overflow = 'hidden';\n }\n _setElementAttributes(selector, styleProperty, callback) {\n const scrollbarWidth = this.getWidth();\n const manipulationCallBack = element => {\n if (element !== this._element && window.innerWidth > element.clientWidth + scrollbarWidth) {\n return;\n }\n this._saveInitialAttribute(element, styleProperty);\n const calculatedValue = window.getComputedStyle(element).getPropertyValue(styleProperty);\n element.style.setProperty(styleProperty, `${callback(Number.parseFloat(calculatedValue))}px`);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _saveInitialAttribute(element, styleProperty) {\n const actualValue = element.style.getPropertyValue(styleProperty);\n if (actualValue) {\n Manipulator.setDataAttribute(element, styleProperty, actualValue);\n }\n }\n _resetElementAttributes(selector, styleProperty) {\n const manipulationCallBack = element => {\n const value = Manipulator.getDataAttribute(element, styleProperty);\n // We only want to remove the property if the value is `null`; the value can also be zero\n if (value === null) {\n element.style.removeProperty(styleProperty);\n return;\n }\n Manipulator.removeDataAttribute(element, styleProperty);\n element.style.setProperty(styleProperty, value);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _applyManipulationCallback(selector, callBack) {\n if (isElement(selector)) {\n callBack(selector);\n return;\n }\n for (const sel of SelectorEngine.find(selector, this._element)) {\n callBack(sel);\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap modal.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$7 = 'modal';\nconst DATA_KEY$4 = 'bs.modal';\nconst EVENT_KEY$4 = `.${DATA_KEY$4}`;\nconst DATA_API_KEY$2 = '.data-api';\nconst ESCAPE_KEY$1 = 'Escape';\nconst EVENT_HIDE$4 = `hide${EVENT_KEY$4}`;\nconst EVENT_HIDE_PREVENTED$1 = `hidePrevented${EVENT_KEY$4}`;\nconst EVENT_HIDDEN$4 = `hidden${EVENT_KEY$4}`;\nconst EVENT_SHOW$4 = `show${EVENT_KEY$4}`;\nconst EVENT_SHOWN$4 = `shown${EVENT_KEY$4}`;\nconst EVENT_RESIZE$1 = `resize${EVENT_KEY$4}`;\nconst EVENT_CLICK_DISMISS = `click.dismiss${EVENT_KEY$4}`;\nconst EVENT_MOUSEDOWN_DISMISS = `mousedown.dismiss${EVENT_KEY$4}`;\nconst EVENT_KEYDOWN_DISMISS$1 = `keydown.dismiss${EVENT_KEY$4}`;\nconst EVENT_CLICK_DATA_API$2 = `click${EVENT_KEY$4}${DATA_API_KEY$2}`;\nconst CLASS_NAME_OPEN = 'modal-open';\nconst CLASS_NAME_FADE$3 = 'fade';\nconst CLASS_NAME_SHOW$4 = 'show';\nconst CLASS_NAME_STATIC = 'modal-static';\nconst OPEN_SELECTOR$1 = '.modal.show';\nconst SELECTOR_DIALOG = '.modal-dialog';\nconst SELECTOR_MODAL_BODY = '.modal-body';\nconst SELECTOR_DATA_TOGGLE$2 = '[data-bs-toggle=\"modal\"]';\nconst Default$6 = {\n backdrop: true,\n focus: true,\n keyboard: true\n};\nconst DefaultType$6 = {\n backdrop: '(boolean|string)',\n focus: 'boolean',\n keyboard: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Modal extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._dialog = SelectorEngine.findOne(SELECTOR_DIALOG, this._element);\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._isShown = false;\n this._isTransitioning = false;\n this._scrollBar = new ScrollBarHelper();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$6;\n }\n static get DefaultType() {\n return DefaultType$6;\n }\n static get NAME() {\n return NAME$7;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown || this._isTransitioning) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$4, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._isTransitioning = true;\n this._scrollBar.hide();\n document.body.classList.add(CLASS_NAME_OPEN);\n this._adjustDialog();\n this._backdrop.show(() => this._showElement(relatedTarget));\n }\n hide() {\n if (!this._isShown || this._isTransitioning) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$4);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._isShown = false;\n this._isTransitioning = true;\n this._focustrap.deactivate();\n this._element.classList.remove(CLASS_NAME_SHOW$4);\n this._queueCallback(() => this._hideModal(), this._element, this._isAnimated());\n }\n dispose() {\n EventHandler.off(window, EVENT_KEY$4);\n EventHandler.off(this._dialog, EVENT_KEY$4);\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n handleUpdate() {\n this._adjustDialog();\n }\n\n // Private\n _initializeBackDrop() {\n return new Backdrop({\n isVisible: Boolean(this._config.backdrop),\n // 'static' option will be translated to true, and booleans will keep their value,\n isAnimated: this._isAnimated()\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _showElement(relatedTarget) {\n // try to append dynamic modal\n if (!document.body.contains(this._element)) {\n document.body.append(this._element);\n }\n this._element.style.display = 'block';\n this._element.removeAttribute('aria-hidden');\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.scrollTop = 0;\n const modalBody = SelectorEngine.findOne(SELECTOR_MODAL_BODY, this._dialog);\n if (modalBody) {\n modalBody.scrollTop = 0;\n }\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_SHOW$4);\n const transitionComplete = () => {\n if (this._config.focus) {\n this._focustrap.activate();\n }\n this._isTransitioning = false;\n EventHandler.trigger(this._element, EVENT_SHOWN$4, {\n relatedTarget\n });\n };\n this._queueCallback(transitionComplete, this._dialog, this._isAnimated());\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS$1, event => {\n if (event.key !== ESCAPE_KEY$1) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n this._triggerBackdropTransition();\n });\n EventHandler.on(window, EVENT_RESIZE$1, () => {\n if (this._isShown && !this._isTransitioning) {\n this._adjustDialog();\n }\n });\n EventHandler.on(this._element, EVENT_MOUSEDOWN_DISMISS, event => {\n // a bad trick to segregate clicks that may start inside dialog but end outside, and avoid listen to scrollbar clicks\n EventHandler.one(this._element, EVENT_CLICK_DISMISS, event2 => {\n if (this._element !== event.target || this._element !== event2.target) {\n return;\n }\n if (this._config.backdrop === 'static') {\n this._triggerBackdropTransition();\n return;\n }\n if (this._config.backdrop) {\n this.hide();\n }\n });\n });\n }\n _hideModal() {\n this._element.style.display = 'none';\n this._element.setAttribute('aria-hidden', true);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n this._isTransitioning = false;\n this._backdrop.hide(() => {\n document.body.classList.remove(CLASS_NAME_OPEN);\n this._resetAdjustments();\n this._scrollBar.reset();\n EventHandler.trigger(this._element, EVENT_HIDDEN$4);\n });\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_FADE$3);\n }\n _triggerBackdropTransition() {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED$1);\n if (hideEvent.defaultPrevented) {\n return;\n }\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const initialOverflowY = this._element.style.overflowY;\n // return if the following background transition hasn't yet completed\n if (initialOverflowY === 'hidden' || this._element.classList.contains(CLASS_NAME_STATIC)) {\n return;\n }\n if (!isModalOverflowing) {\n this._element.style.overflowY = 'hidden';\n }\n this._element.classList.add(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.classList.remove(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.style.overflowY = initialOverflowY;\n }, this._dialog);\n }, this._dialog);\n this._element.focus();\n }\n\n /**\n * The following methods are used to handle overflowing modals\n */\n\n _adjustDialog() {\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const scrollbarWidth = this._scrollBar.getWidth();\n const isBodyOverflowing = scrollbarWidth > 0;\n if (isBodyOverflowing && !isModalOverflowing) {\n const property = isRTL() ? 'paddingLeft' : 'paddingRight';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n if (!isBodyOverflowing && isModalOverflowing) {\n const property = isRTL() ? 'paddingRight' : 'paddingLeft';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n }\n _resetAdjustments() {\n this._element.style.paddingLeft = '';\n this._element.style.paddingRight = '';\n }\n\n // Static\n static jQueryInterface(config, relatedTarget) {\n return this.each(function () {\n const data = Modal.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](relatedTarget);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$2, SELECTOR_DATA_TOGGLE$2, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n EventHandler.one(target, EVENT_SHOW$4, showEvent => {\n if (showEvent.defaultPrevented) {\n // only register focus restorer if modal will actually get shown\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$4, () => {\n if (isVisible(this)) {\n this.focus();\n }\n });\n });\n\n // avoid conflict when clicking modal toggler while another one is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR$1);\n if (alreadyOpen) {\n Modal.getInstance(alreadyOpen).hide();\n }\n const data = Modal.getOrCreateInstance(target);\n data.toggle(this);\n});\nenableDismissTrigger(Modal);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Modal);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap offcanvas.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$6 = 'offcanvas';\nconst DATA_KEY$3 = 'bs.offcanvas';\nconst EVENT_KEY$3 = `.${DATA_KEY$3}`;\nconst DATA_API_KEY$1 = '.data-api';\nconst EVENT_LOAD_DATA_API$2 = `load${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst ESCAPE_KEY = 'Escape';\nconst CLASS_NAME_SHOW$3 = 'show';\nconst CLASS_NAME_SHOWING$1 = 'showing';\nconst CLASS_NAME_HIDING = 'hiding';\nconst CLASS_NAME_BACKDROP = 'offcanvas-backdrop';\nconst OPEN_SELECTOR = '.offcanvas.show';\nconst EVENT_SHOW$3 = `show${EVENT_KEY$3}`;\nconst EVENT_SHOWN$3 = `shown${EVENT_KEY$3}`;\nconst EVENT_HIDE$3 = `hide${EVENT_KEY$3}`;\nconst EVENT_HIDE_PREVENTED = `hidePrevented${EVENT_KEY$3}`;\nconst EVENT_HIDDEN$3 = `hidden${EVENT_KEY$3}`;\nconst EVENT_RESIZE = `resize${EVENT_KEY$3}`;\nconst EVENT_CLICK_DATA_API$1 = `click${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst EVENT_KEYDOWN_DISMISS = `keydown.dismiss${EVENT_KEY$3}`;\nconst SELECTOR_DATA_TOGGLE$1 = '[data-bs-toggle=\"offcanvas\"]';\nconst Default$5 = {\n backdrop: true,\n keyboard: true,\n scroll: false\n};\nconst DefaultType$5 = {\n backdrop: '(boolean|string)',\n keyboard: 'boolean',\n scroll: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Offcanvas extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isShown = false;\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$5;\n }\n static get DefaultType() {\n return DefaultType$5;\n }\n static get NAME() {\n return NAME$6;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$3, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._backdrop.show();\n if (!this._config.scroll) {\n new ScrollBarHelper().hide();\n }\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.classList.add(CLASS_NAME_SHOWING$1);\n const completeCallBack = () => {\n if (!this._config.scroll || this._config.backdrop) {\n this._focustrap.activate();\n }\n this._element.classList.add(CLASS_NAME_SHOW$3);\n this._element.classList.remove(CLASS_NAME_SHOWING$1);\n EventHandler.trigger(this._element, EVENT_SHOWN$3, {\n relatedTarget\n });\n };\n this._queueCallback(completeCallBack, this._element, true);\n }\n hide() {\n if (!this._isShown) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$3);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._focustrap.deactivate();\n this._element.blur();\n this._isShown = false;\n this._element.classList.add(CLASS_NAME_HIDING);\n this._backdrop.hide();\n const completeCallback = () => {\n this._element.classList.remove(CLASS_NAME_SHOW$3, CLASS_NAME_HIDING);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n if (!this._config.scroll) {\n new ScrollBarHelper().reset();\n }\n EventHandler.trigger(this._element, EVENT_HIDDEN$3);\n };\n this._queueCallback(completeCallback, this._element, true);\n }\n dispose() {\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n\n // Private\n _initializeBackDrop() {\n const clickCallback = () => {\n if (this._config.backdrop === 'static') {\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n return;\n }\n this.hide();\n };\n\n // 'static' option will be translated to true, and booleans will keep their value\n const isVisible = Boolean(this._config.backdrop);\n return new Backdrop({\n className: CLASS_NAME_BACKDROP,\n isVisible,\n isAnimated: true,\n rootElement: this._element.parentNode,\n clickCallback: isVisible ? clickCallback : null\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS, event => {\n if (event.key !== ESCAPE_KEY) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n });\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Offcanvas.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$1, SELECTOR_DATA_TOGGLE$1, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$3, () => {\n // focus on trigger when it is closed\n if (isVisible(this)) {\n this.focus();\n }\n });\n\n // avoid conflict when clicking a toggler of an offcanvas, while another is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR);\n if (alreadyOpen && alreadyOpen !== target) {\n Offcanvas.getInstance(alreadyOpen).hide();\n }\n const data = Offcanvas.getOrCreateInstance(target);\n data.toggle(this);\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$2, () => {\n for (const selector of SelectorEngine.find(OPEN_SELECTOR)) {\n Offcanvas.getOrCreateInstance(selector).show();\n }\n});\nEventHandler.on(window, EVENT_RESIZE, () => {\n for (const element of SelectorEngine.find('[aria-modal][class*=show][class*=offcanvas-]')) {\n if (getComputedStyle(element).position !== 'fixed') {\n Offcanvas.getOrCreateInstance(element).hide();\n }\n }\n});\nenableDismissTrigger(Offcanvas);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Offcanvas);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/sanitizer.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n// js-docs-start allow-list\nconst ARIA_ATTRIBUTE_PATTERN = /^aria-[\\w-]*$/i;\nconst DefaultAllowlist = {\n // Global attributes allowed on any supplied element below.\n '*': ['class', 'dir', 'id', 'lang', 'role', ARIA_ATTRIBUTE_PATTERN],\n a: ['target', 'href', 'title', 'rel'],\n area: [],\n b: [],\n br: [],\n col: [],\n code: [],\n dd: [],\n div: [],\n dl: [],\n dt: [],\n em: [],\n hr: [],\n h1: [],\n h2: [],\n h3: [],\n h4: [],\n h5: [],\n h6: [],\n i: [],\n img: ['src', 'srcset', 'alt', 'title', 'width', 'height'],\n li: [],\n ol: [],\n p: [],\n pre: [],\n s: [],\n small: [],\n span: [],\n sub: [],\n sup: [],\n strong: [],\n u: [],\n ul: []\n};\n// js-docs-end allow-list\n\nconst uriAttributes = new Set(['background', 'cite', 'href', 'itemtype', 'longdesc', 'poster', 'src', 'xlink:href']);\n\n/**\n * A pattern that recognizes URLs that are safe wrt. XSS in URL navigation\n * contexts.\n *\n * Shout-out to Angular https://github.com/angular/angular/blob/15.2.8/packages/core/src/sanitization/url_sanitizer.ts#L38\n */\n// eslint-disable-next-line unicorn/better-regex\nconst SAFE_URL_PATTERN = /^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i;\nconst allowedAttribute = (attribute, allowedAttributeList) => {\n const attributeName = attribute.nodeName.toLowerCase();\n if (allowedAttributeList.includes(attributeName)) {\n if (uriAttributes.has(attributeName)) {\n return Boolean(SAFE_URL_PATTERN.test(attribute.nodeValue));\n }\n return true;\n }\n\n // Check if a regular expression validates the attribute.\n return allowedAttributeList.filter(attributeRegex => attributeRegex instanceof RegExp).some(regex => regex.test(attributeName));\n};\nfunction sanitizeHtml(unsafeHtml, allowList, sanitizeFunction) {\n if (!unsafeHtml.length) {\n return unsafeHtml;\n }\n if (sanitizeFunction && typeof sanitizeFunction === 'function') {\n return sanitizeFunction(unsafeHtml);\n }\n const domParser = new window.DOMParser();\n const createdDocument = domParser.parseFromString(unsafeHtml, 'text/html');\n const elements = [].concat(...createdDocument.body.querySelectorAll('*'));\n for (const element of elements) {\n const elementName = element.nodeName.toLowerCase();\n if (!Object.keys(allowList).includes(elementName)) {\n element.remove();\n continue;\n }\n const attributeList = [].concat(...element.attributes);\n const allowedAttributes = [].concat(allowList['*'] || [], allowList[elementName] || []);\n for (const attribute of attributeList) {\n if (!allowedAttribute(attribute, allowedAttributes)) {\n element.removeAttribute(attribute.nodeName);\n }\n }\n }\n return createdDocument.body.innerHTML;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/template-factory.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$5 = 'TemplateFactory';\nconst Default$4 = {\n allowList: DefaultAllowlist,\n content: {},\n // { selector : text , selector2 : text2 , }\n extraClass: '',\n html: false,\n sanitize: true,\n sanitizeFn: null,\n template: '
'\n};\nconst DefaultType$4 = {\n allowList: 'object',\n content: 'object',\n extraClass: '(string|function)',\n html: 'boolean',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n template: 'string'\n};\nconst DefaultContentType = {\n entry: '(string|element|function|null)',\n selector: '(string|element)'\n};\n\n/**\n * Class definition\n */\n\nclass TemplateFactory extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n }\n\n // Getters\n static get Default() {\n return Default$4;\n }\n static get DefaultType() {\n return DefaultType$4;\n }\n static get NAME() {\n return NAME$5;\n }\n\n // Public\n getContent() {\n return Object.values(this._config.content).map(config => this._resolvePossibleFunction(config)).filter(Boolean);\n }\n hasContent() {\n return this.getContent().length > 0;\n }\n changeContent(content) {\n this._checkContent(content);\n this._config.content = {\n ...this._config.content,\n ...content\n };\n return this;\n }\n toHtml() {\n const templateWrapper = document.createElement('div');\n templateWrapper.innerHTML = this._maybeSanitize(this._config.template);\n for (const [selector, text] of Object.entries(this._config.content)) {\n this._setContent(templateWrapper, text, selector);\n }\n const template = templateWrapper.children[0];\n const extraClass = this._resolvePossibleFunction(this._config.extraClass);\n if (extraClass) {\n template.classList.add(...extraClass.split(' '));\n }\n return template;\n }\n\n // Private\n _typeCheckConfig(config) {\n super._typeCheckConfig(config);\n this._checkContent(config.content);\n }\n _checkContent(arg) {\n for (const [selector, content] of Object.entries(arg)) {\n super._typeCheckConfig({\n selector,\n entry: content\n }, DefaultContentType);\n }\n }\n _setContent(template, content, selector) {\n const templateElement = SelectorEngine.findOne(selector, template);\n if (!templateElement) {\n return;\n }\n content = this._resolvePossibleFunction(content);\n if (!content) {\n templateElement.remove();\n return;\n }\n if (isElement(content)) {\n this._putElementInTemplate(getElement(content), templateElement);\n return;\n }\n if (this._config.html) {\n templateElement.innerHTML = this._maybeSanitize(content);\n return;\n }\n templateElement.textContent = content;\n }\n _maybeSanitize(arg) {\n return this._config.sanitize ? sanitizeHtml(arg, this._config.allowList, this._config.sanitizeFn) : arg;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this]);\n }\n _putElementInTemplate(element, templateElement) {\n if (this._config.html) {\n templateElement.innerHTML = '';\n templateElement.append(element);\n return;\n }\n templateElement.textContent = element.textContent;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap tooltip.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$4 = 'tooltip';\nconst DISALLOWED_ATTRIBUTES = new Set(['sanitize', 'allowList', 'sanitizeFn']);\nconst CLASS_NAME_FADE$2 = 'fade';\nconst CLASS_NAME_MODAL = 'modal';\nconst CLASS_NAME_SHOW$2 = 'show';\nconst SELECTOR_TOOLTIP_INNER = '.tooltip-inner';\nconst SELECTOR_MODAL = `.${CLASS_NAME_MODAL}`;\nconst EVENT_MODAL_HIDE = 'hide.bs.modal';\nconst TRIGGER_HOVER = 'hover';\nconst TRIGGER_FOCUS = 'focus';\nconst TRIGGER_CLICK = 'click';\nconst TRIGGER_MANUAL = 'manual';\nconst EVENT_HIDE$2 = 'hide';\nconst EVENT_HIDDEN$2 = 'hidden';\nconst EVENT_SHOW$2 = 'show';\nconst EVENT_SHOWN$2 = 'shown';\nconst EVENT_INSERTED = 'inserted';\nconst EVENT_CLICK$1 = 'click';\nconst EVENT_FOCUSIN$1 = 'focusin';\nconst EVENT_FOCUSOUT$1 = 'focusout';\nconst EVENT_MOUSEENTER = 'mouseenter';\nconst EVENT_MOUSELEAVE = 'mouseleave';\nconst AttachmentMap = {\n AUTO: 'auto',\n TOP: 'top',\n RIGHT: isRTL() ? 'left' : 'right',\n BOTTOM: 'bottom',\n LEFT: isRTL() ? 'right' : 'left'\n};\nconst Default$3 = {\n allowList: DefaultAllowlist,\n animation: true,\n boundary: 'clippingParents',\n container: false,\n customClass: '',\n delay: 0,\n fallbackPlacements: ['top', 'right', 'bottom', 'left'],\n html: false,\n offset: [0, 6],\n placement: 'top',\n popperConfig: null,\n sanitize: true,\n sanitizeFn: null,\n selector: false,\n template: '
' + '
' + '
' + '
',\n title: '',\n trigger: 'hover focus'\n};\nconst DefaultType$3 = {\n allowList: 'object',\n animation: 'boolean',\n boundary: '(string|element)',\n container: '(string|element|boolean)',\n customClass: '(string|function)',\n delay: '(number|object)',\n fallbackPlacements: 'array',\n html: 'boolean',\n offset: '(array|string|function)',\n placement: '(string|function)',\n popperConfig: '(null|object|function)',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n selector: '(string|boolean)',\n template: 'string',\n title: '(string|element|function)',\n trigger: 'string'\n};\n\n/**\n * Class definition\n */\n\nclass Tooltip extends BaseComponent {\n constructor(element, config) {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s tooltips require Popper (https://popper.js.org)');\n }\n super(element, config);\n\n // Private\n this._isEnabled = true;\n this._timeout = 0;\n this._isHovered = null;\n this._activeTrigger = {};\n this._popper = null;\n this._templateFactory = null;\n this._newContent = null;\n\n // Protected\n this.tip = null;\n this._setListeners();\n if (!this._config.selector) {\n this._fixTitle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$3;\n }\n static get DefaultType() {\n return DefaultType$3;\n }\n static get NAME() {\n return NAME$4;\n }\n\n // Public\n enable() {\n this._isEnabled = true;\n }\n disable() {\n this._isEnabled = false;\n }\n toggleEnabled() {\n this._isEnabled = !this._isEnabled;\n }\n toggle() {\n if (!this._isEnabled) {\n return;\n }\n this._activeTrigger.click = !this._activeTrigger.click;\n if (this._isShown()) {\n this._leave();\n return;\n }\n this._enter();\n }\n dispose() {\n clearTimeout(this._timeout);\n EventHandler.off(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n if (this._element.getAttribute('data-bs-original-title')) {\n this._element.setAttribute('title', this._element.getAttribute('data-bs-original-title'));\n }\n this._disposePopper();\n super.dispose();\n }\n show() {\n if (this._element.style.display === 'none') {\n throw new Error('Please use show on visible elements');\n }\n if (!(this._isWithContent() && this._isEnabled)) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOW$2));\n const shadowRoot = findShadowRoot(this._element);\n const isInTheDom = (shadowRoot || this._element.ownerDocument.documentElement).contains(this._element);\n if (showEvent.defaultPrevented || !isInTheDom) {\n return;\n }\n\n // TODO: v6 remove this or make it optional\n this._disposePopper();\n const tip = this._getTipElement();\n this._element.setAttribute('aria-describedby', tip.getAttribute('id'));\n const {\n container\n } = this._config;\n if (!this._element.ownerDocument.documentElement.contains(this.tip)) {\n container.append(tip);\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_INSERTED));\n }\n this._popper = this._createPopper(tip);\n tip.classList.add(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n const complete = () => {\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOWN$2));\n if (this._isHovered === false) {\n this._leave();\n }\n this._isHovered = false;\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n hide() {\n if (!this._isShown()) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDE$2));\n if (hideEvent.defaultPrevented) {\n return;\n }\n const tip = this._getTipElement();\n tip.classList.remove(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n this._activeTrigger[TRIGGER_CLICK] = false;\n this._activeTrigger[TRIGGER_FOCUS] = false;\n this._activeTrigger[TRIGGER_HOVER] = false;\n this._isHovered = null; // it is a trick to support manual triggering\n\n const complete = () => {\n if (this._isWithActiveTrigger()) {\n return;\n }\n if (!this._isHovered) {\n this._disposePopper();\n }\n this._element.removeAttribute('aria-describedby');\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDDEN$2));\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n update() {\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Protected\n _isWithContent() {\n return Boolean(this._getTitle());\n }\n _getTipElement() {\n if (!this.tip) {\n this.tip = this._createTipElement(this._newContent || this._getContentForTemplate());\n }\n return this.tip;\n }\n _createTipElement(content) {\n const tip = this._getTemplateFactory(content).toHtml();\n\n // TODO: remove this check in v6\n if (!tip) {\n return null;\n }\n tip.classList.remove(CLASS_NAME_FADE$2, CLASS_NAME_SHOW$2);\n // TODO: v6 the following can be achieved with CSS only\n tip.classList.add(`bs-${this.constructor.NAME}-auto`);\n const tipId = getUID(this.constructor.NAME).toString();\n tip.setAttribute('id', tipId);\n if (this._isAnimated()) {\n tip.classList.add(CLASS_NAME_FADE$2);\n }\n return tip;\n }\n setContent(content) {\n this._newContent = content;\n if (this._isShown()) {\n this._disposePopper();\n this.show();\n }\n }\n _getTemplateFactory(content) {\n if (this._templateFactory) {\n this._templateFactory.changeContent(content);\n } else {\n this._templateFactory = new TemplateFactory({\n ...this._config,\n // the `content` var has to be after `this._config`\n // to override config.content in case of popover\n content,\n extraClass: this._resolvePossibleFunction(this._config.customClass)\n });\n }\n return this._templateFactory;\n }\n _getContentForTemplate() {\n return {\n [SELECTOR_TOOLTIP_INNER]: this._getTitle()\n };\n }\n _getTitle() {\n return this._resolvePossibleFunction(this._config.title) || this._element.getAttribute('data-bs-original-title');\n }\n\n // Private\n _initializeOnDelegatedTarget(event) {\n return this.constructor.getOrCreateInstance(event.delegateTarget, this._getDelegateConfig());\n }\n _isAnimated() {\n return this._config.animation || this.tip && this.tip.classList.contains(CLASS_NAME_FADE$2);\n }\n _isShown() {\n return this.tip && this.tip.classList.contains(CLASS_NAME_SHOW$2);\n }\n _createPopper(tip) {\n const placement = execute(this._config.placement, [this, tip, this._element]);\n const attachment = AttachmentMap[placement.toUpperCase()];\n return Popper.createPopper(this._element, tip, this._getPopperConfig(attachment));\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this._element]);\n }\n _getPopperConfig(attachment) {\n const defaultBsPopperConfig = {\n placement: attachment,\n modifiers: [{\n name: 'flip',\n options: {\n fallbackPlacements: this._config.fallbackPlacements\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }, {\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'arrow',\n options: {\n element: `.${this.constructor.NAME}-arrow`\n }\n }, {\n name: 'preSetPlacement',\n enabled: true,\n phase: 'beforeMain',\n fn: data => {\n // Pre-set Popper's placement attribute in order to read the arrow sizes properly.\n // Otherwise, Popper mixes up the width and height dimensions since the initial arrow style is for top placement\n this._getTipElement().setAttribute('data-popper-placement', data.state.placement);\n }\n }]\n };\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _setListeners() {\n const triggers = this._config.trigger.split(' ');\n for (const trigger of triggers) {\n if (trigger === 'click') {\n EventHandler.on(this._element, this.constructor.eventName(EVENT_CLICK$1), this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context.toggle();\n });\n } else if (trigger !== TRIGGER_MANUAL) {\n const eventIn = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSEENTER) : this.constructor.eventName(EVENT_FOCUSIN$1);\n const eventOut = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSELEAVE) : this.constructor.eventName(EVENT_FOCUSOUT$1);\n EventHandler.on(this._element, eventIn, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusin' ? TRIGGER_FOCUS : TRIGGER_HOVER] = true;\n context._enter();\n });\n EventHandler.on(this._element, eventOut, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusout' ? TRIGGER_FOCUS : TRIGGER_HOVER] = context._element.contains(event.relatedTarget);\n context._leave();\n });\n }\n }\n this._hideModalHandler = () => {\n if (this._element) {\n this.hide();\n }\n };\n EventHandler.on(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n }\n _fixTitle() {\n const title = this._element.getAttribute('title');\n if (!title) {\n return;\n }\n if (!this._element.getAttribute('aria-label') && !this._element.textContent.trim()) {\n this._element.setAttribute('aria-label', title);\n }\n this._element.setAttribute('data-bs-original-title', title); // DO NOT USE IT. Is only for backwards compatibility\n this._element.removeAttribute('title');\n }\n _enter() {\n if (this._isShown() || this._isHovered) {\n this._isHovered = true;\n return;\n }\n this._isHovered = true;\n this._setTimeout(() => {\n if (this._isHovered) {\n this.show();\n }\n }, this._config.delay.show);\n }\n _leave() {\n if (this._isWithActiveTrigger()) {\n return;\n }\n this._isHovered = false;\n this._setTimeout(() => {\n if (!this._isHovered) {\n this.hide();\n }\n }, this._config.delay.hide);\n }\n _setTimeout(handler, timeout) {\n clearTimeout(this._timeout);\n this._timeout = setTimeout(handler, timeout);\n }\n _isWithActiveTrigger() {\n return Object.values(this._activeTrigger).includes(true);\n }\n _getConfig(config) {\n const dataAttributes = Manipulator.getDataAttributes(this._element);\n for (const dataAttribute of Object.keys(dataAttributes)) {\n if (DISALLOWED_ATTRIBUTES.has(dataAttribute)) {\n delete dataAttributes[dataAttribute];\n }\n }\n config = {\n ...dataAttributes,\n ...(typeof config === 'object' && config ? config : {})\n };\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n config.container = config.container === false ? document.body : getElement(config.container);\n if (typeof config.delay === 'number') {\n config.delay = {\n show: config.delay,\n hide: config.delay\n };\n }\n if (typeof config.title === 'number') {\n config.title = config.title.toString();\n }\n if (typeof config.content === 'number') {\n config.content = config.content.toString();\n }\n return config;\n }\n _getDelegateConfig() {\n const config = {};\n for (const [key, value] of Object.entries(this._config)) {\n if (this.constructor.Default[key] !== value) {\n config[key] = value;\n }\n }\n config.selector = false;\n config.trigger = 'manual';\n\n // In the future can be replaced with:\n // const keysWithDifferentValues = Object.entries(this._config).filter(entry => this.constructor.Default[entry[0]] !== this._config[entry[0]])\n // `Object.fromEntries(keysWithDifferentValues)`\n return config;\n }\n _disposePopper() {\n if (this._popper) {\n this._popper.destroy();\n this._popper = null;\n }\n if (this.tip) {\n this.tip.remove();\n this.tip = null;\n }\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Tooltip.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Tooltip);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap popover.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$3 = 'popover';\nconst SELECTOR_TITLE = '.popover-header';\nconst SELECTOR_CONTENT = '.popover-body';\nconst Default$2 = {\n ...Tooltip.Default,\n content: '',\n offset: [0, 8],\n placement: 'right',\n template: '
' + '
' + '

' + '
' + '
',\n trigger: 'click'\n};\nconst DefaultType$2 = {\n ...Tooltip.DefaultType,\n content: '(null|string|element|function)'\n};\n\n/**\n * Class definition\n */\n\nclass Popover extends Tooltip {\n // Getters\n static get Default() {\n return Default$2;\n }\n static get DefaultType() {\n return DefaultType$2;\n }\n static get NAME() {\n return NAME$3;\n }\n\n // Overrides\n _isWithContent() {\n return this._getTitle() || this._getContent();\n }\n\n // Private\n _getContentForTemplate() {\n return {\n [SELECTOR_TITLE]: this._getTitle(),\n [SELECTOR_CONTENT]: this._getContent()\n };\n }\n _getContent() {\n return this._resolvePossibleFunction(this._config.content);\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Popover.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Popover);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap scrollspy.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$2 = 'scrollspy';\nconst DATA_KEY$2 = 'bs.scrollspy';\nconst EVENT_KEY$2 = `.${DATA_KEY$2}`;\nconst DATA_API_KEY = '.data-api';\nconst EVENT_ACTIVATE = `activate${EVENT_KEY$2}`;\nconst EVENT_CLICK = `click${EVENT_KEY$2}`;\nconst EVENT_LOAD_DATA_API$1 = `load${EVENT_KEY$2}${DATA_API_KEY}`;\nconst CLASS_NAME_DROPDOWN_ITEM = 'dropdown-item';\nconst CLASS_NAME_ACTIVE$1 = 'active';\nconst SELECTOR_DATA_SPY = '[data-bs-spy=\"scroll\"]';\nconst SELECTOR_TARGET_LINKS = '[href]';\nconst SELECTOR_NAV_LIST_GROUP = '.nav, .list-group';\nconst SELECTOR_NAV_LINKS = '.nav-link';\nconst SELECTOR_NAV_ITEMS = '.nav-item';\nconst SELECTOR_LIST_ITEMS = '.list-group-item';\nconst SELECTOR_LINK_ITEMS = `${SELECTOR_NAV_LINKS}, ${SELECTOR_NAV_ITEMS} > ${SELECTOR_NAV_LINKS}, ${SELECTOR_LIST_ITEMS}`;\nconst SELECTOR_DROPDOWN = '.dropdown';\nconst SELECTOR_DROPDOWN_TOGGLE$1 = '.dropdown-toggle';\nconst Default$1 = {\n offset: null,\n // TODO: v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: '0px 0px -25%',\n smoothScroll: false,\n target: null,\n threshold: [0.1, 0.5, 1]\n};\nconst DefaultType$1 = {\n offset: '(number|null)',\n // TODO v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: 'string',\n smoothScroll: 'boolean',\n target: 'element',\n threshold: 'array'\n};\n\n/**\n * Class definition\n */\n\nclass ScrollSpy extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n\n // this._element is the observablesContainer and config.target the menu links wrapper\n this._targetLinks = new Map();\n this._observableSections = new Map();\n this._rootElement = getComputedStyle(this._element).overflowY === 'visible' ? null : this._element;\n this._activeTarget = null;\n this._observer = null;\n this._previousScrollData = {\n visibleEntryTop: 0,\n parentScrollTop: 0\n };\n this.refresh(); // initialize\n }\n\n // Getters\n static get Default() {\n return Default$1;\n }\n static get DefaultType() {\n return DefaultType$1;\n }\n static get NAME() {\n return NAME$2;\n }\n\n // Public\n refresh() {\n this._initializeTargetsAndObservables();\n this._maybeEnableSmoothScroll();\n if (this._observer) {\n this._observer.disconnect();\n } else {\n this._observer = this._getNewObserver();\n }\n for (const section of this._observableSections.values()) {\n this._observer.observe(section);\n }\n }\n dispose() {\n this._observer.disconnect();\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n // TODO: on v6 target should be given explicitly & remove the {target: 'ss-target'} case\n config.target = getElement(config.target) || document.body;\n\n // TODO: v6 Only for backwards compatibility reasons. Use rootMargin only\n config.rootMargin = config.offset ? `${config.offset}px 0px -30%` : config.rootMargin;\n if (typeof config.threshold === 'string') {\n config.threshold = config.threshold.split(',').map(value => Number.parseFloat(value));\n }\n return config;\n }\n _maybeEnableSmoothScroll() {\n if (!this._config.smoothScroll) {\n return;\n }\n\n // unregister any previous listeners\n EventHandler.off(this._config.target, EVENT_CLICK);\n EventHandler.on(this._config.target, EVENT_CLICK, SELECTOR_TARGET_LINKS, event => {\n const observableSection = this._observableSections.get(event.target.hash);\n if (observableSection) {\n event.preventDefault();\n const root = this._rootElement || window;\n const height = observableSection.offsetTop - this._element.offsetTop;\n if (root.scrollTo) {\n root.scrollTo({\n top: height,\n behavior: 'smooth'\n });\n return;\n }\n\n // Chrome 60 doesn't support `scrollTo`\n root.scrollTop = height;\n }\n });\n }\n _getNewObserver() {\n const options = {\n root: this._rootElement,\n threshold: this._config.threshold,\n rootMargin: this._config.rootMargin\n };\n return new IntersectionObserver(entries => this._observerCallback(entries), options);\n }\n\n // The logic of selection\n _observerCallback(entries) {\n const targetElement = entry => this._targetLinks.get(`#${entry.target.id}`);\n const activate = entry => {\n this._previousScrollData.visibleEntryTop = entry.target.offsetTop;\n this._process(targetElement(entry));\n };\n const parentScrollTop = (this._rootElement || document.documentElement).scrollTop;\n const userScrollsDown = parentScrollTop >= this._previousScrollData.parentScrollTop;\n this._previousScrollData.parentScrollTop = parentScrollTop;\n for (const entry of entries) {\n if (!entry.isIntersecting) {\n this._activeTarget = null;\n this._clearActiveClass(targetElement(entry));\n continue;\n }\n const entryIsLowerThanPrevious = entry.target.offsetTop >= this._previousScrollData.visibleEntryTop;\n // if we are scrolling down, pick the bigger offsetTop\n if (userScrollsDown && entryIsLowerThanPrevious) {\n activate(entry);\n // if parent isn't scrolled, let's keep the first visible item, breaking the iteration\n if (!parentScrollTop) {\n return;\n }\n continue;\n }\n\n // if we are scrolling up, pick the smallest offsetTop\n if (!userScrollsDown && !entryIsLowerThanPrevious) {\n activate(entry);\n }\n }\n }\n _initializeTargetsAndObservables() {\n this._targetLinks = new Map();\n this._observableSections = new Map();\n const targetLinks = SelectorEngine.find(SELECTOR_TARGET_LINKS, this._config.target);\n for (const anchor of targetLinks) {\n // ensure that the anchor has an id and is not disabled\n if (!anchor.hash || isDisabled(anchor)) {\n continue;\n }\n const observableSection = SelectorEngine.findOne(decodeURI(anchor.hash), this._element);\n\n // ensure that the observableSection exists & is visible\n if (isVisible(observableSection)) {\n this._targetLinks.set(decodeURI(anchor.hash), anchor);\n this._observableSections.set(anchor.hash, observableSection);\n }\n }\n }\n _process(target) {\n if (this._activeTarget === target) {\n return;\n }\n this._clearActiveClass(this._config.target);\n this._activeTarget = target;\n target.classList.add(CLASS_NAME_ACTIVE$1);\n this._activateParents(target);\n EventHandler.trigger(this._element, EVENT_ACTIVATE, {\n relatedTarget: target\n });\n }\n _activateParents(target) {\n // Activate dropdown parents\n if (target.classList.contains(CLASS_NAME_DROPDOWN_ITEM)) {\n SelectorEngine.findOne(SELECTOR_DROPDOWN_TOGGLE$1, target.closest(SELECTOR_DROPDOWN)).classList.add(CLASS_NAME_ACTIVE$1);\n return;\n }\n for (const listGroup of SelectorEngine.parents(target, SELECTOR_NAV_LIST_GROUP)) {\n // Set triggered links parents as active\n // With both