diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c7956cb --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ + +.env +.terraform +.terraform.lock.hcl +./examples/*/config-k8s/kubeconfig +./examples/*/terraform.* + + +.idea +*.iml +out \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..29ebfa5 --- /dev/null +++ b/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. \ No newline at end of file diff --git a/README.md b/README.md index 9b45035..d3bbca8 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,98 @@ -# terraform-apitable-enterprise -A Terraform module to create apitable enterprise +# Apitable Enterprise modules + +## Introduction +`APITable` Terraform Modulesis a set of Terraform modules for managing and deploying APITable resources. These modules are designed to simplify the process of creating and configuring APITable resources, provide reusable code blocks, and help you quickly set up and manage your APITable environment. + + +## Module List +The following APITable Terraform modules are available: + +* Apitable-app: + * `web-server`: build supercharged, SEO-friendly, and extremely user-facing static website and web application by using NextJS + * `backend-server`: handles HTTP requests about nodes, users, organizations, etc. Code under Java Spring Boot Framework + * `room-server`: handles operations(OTJSON) of datasheets, communicates with Socket Server through gRPC, and also provides APIs for developers. Coding under TypeScript and NestJS framework. + + `nest-server`: handles HTTP GET requests about datasheets, records, views, etc. + + `socket-server`: establishes a long connection with clients through the WebSocket protocol, allowing for two-way communication and real-time collaboration, notifications, and other features. + * `openresty`: Popular open source gateways. + +* Apitable-datacenter: + * `MySQL`: Mysql is a popular relational database management system, often used for back-end data storage of web applications. + * `Redis`: Redis is a fast in-memory data structure storage system commonly used for caching and message queues. + * `RabbitMQ`: RabbitMQ is an open source message queuing software used for asynchronous communication between applications. + * `Minio`: Minio is an open source object storage server that can be used to store and manage large amounts of data. + +* Regcred: Acts on Docker registry, users download docker image authentication. + +* Charts: Offline charts of the stable version, which includes core modules such as minio, mysql, rabbitmq and redis. + + + + +## Requiremnet +1. `kubconfig` certificate, which requires the following permissions +``` + - configmap:* + - pv:* + - pvc:* + - service:* + - deployment:* + - ns: create, get, list, watch + - events: create, get, list,delete, watch + - secret: * +``` + +2. The cluster has installed the kubernetes-csi driver and obtained the `storageclass`. +3. Providers for Kubernetes and Helm, referring to the values in the examples. + + +## Usage +To use the APITable Terraform Modules, follow these steps: + +1. Create a new directory in your Terraform project to store your APITable-related configuration files. +2. Create a `main.tf` file in that directory and reference the desired modules. +3. Configure the input variables for each module to meet your needs. +4. Run the `terraform init` command to initialize your Terraform environment. +5. Run the `terraform apply` command to create and configure your APITable resources. + + +## Example + +Here's an example of using the APITable Terraform Modules to create an APITable application and database: + +```hcl +module "apitable-enterprise" { + source = "apitable/enterprise/apitable" + version = "1.0.0" + + namespace = "apitable-enterprise" + + // Environmental variables, such as databases, storage, etc. + env = { + "PARAM1" = "xx1" + "PARAM2" = "xx2" + } + + default_storage_class_name = "" + + regcred = { + registry_server = "" + username = "" + password = "" + } + + #Optional, refer https://apitable.getoutline.com/s/82e078fc-1a8d-4616-b69d-fcdbb18ef715/doc/image-version-inventory-J9TZe7ym8I to replace the image version + image_tags = { + backend_server = "latest-alpha" + room_server = "latest-alpha" + web_server = "latest-alpha" + + #And others .. + } +} +``` + +## FAQ +### Q1: How to get regcred information? + +A: Submit the [form](https://vika.cn/share/shrdm5smaJ9XsxSzdTJVw/fom8jGSU4RqAw3vXNs) to request a trial. + diff --git a/charts/README.md b/charts/README.md new file mode 100644 index 0000000..0a32a8a --- /dev/null +++ b/charts/README.md @@ -0,0 +1,4 @@ + +# Helm Charts 安装包 + + diff --git a/charts/init-data/.helmignore b/charts/init-data/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/charts/init-data/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/init-data/Chart.yaml b/charts/init-data/Chart.yaml new file mode 100644 index 0000000..2d14cf1 --- /dev/null +++ b/charts/init-data/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: vika-init-data +description: vika-init-data +type: application +version: 0.1.29 +appVersion: "0.15.0" diff --git a/charts/init-data/templates/NOTES.txt b/charts/init-data/templates/NOTES.txt new file mode 100644 index 0000000..e69de29 diff --git a/charts/init-data/templates/_helpers.tpl b/charts/init-data/templates/_helpers.tpl new file mode 100644 index 0000000..e69de29 diff --git a/charts/init-data/templates/_tplvalues.tpl b/charts/init-data/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/charts/init-data/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/init-data/templates/init-data/_helpers.tpl b/charts/init-data/templates/init-data/_helpers.tpl new file mode 100644 index 0000000..156d600 --- /dev/null +++ b/charts/init-data/templates/init-data/_helpers.tpl @@ -0,0 +1,79 @@ +{{/* +Set redis host +*/}} +{{- define "redis.host" -}} +{{- if .Values.redis.enabled -}} +{{- .Values.redis.host }} +{{- end -}} +{{- end -}} + +{{/* +Set redis port +*/}} +{{- define "redis.port" -}} +{{- if .Values.redis.enabled -}} +{{- .Values.redis.port }} +{{- end -}} +{{- end -}} + +{{/* +Set mysql host +*/}} +{{- define "mysql.host" -}} +{{- if .Values.mysql.enabled -}} +{{- .Values.mysql.host }} +{{- end -}} +{{- end -}} + +{{/* +Set mysql port +*/}} +{{- define "mysql.port" -}} +{{- if .Values.mysql.enabled -}} +{{- .Values.mysql.port }} +{{- end -}} +{{- end -}} + +{{/* +Set mongo host +*/}} +{{- define "mongo.host" -}} +{{- if .Values.mongo.enabled -}} +{{- .Values.mongo.host }} +{{- end -}} +{{- end -}} + +{{/* +Set mongo port +*/}} +{{- define "mongo.port" -}} +{{- if .Values.mongo.enabled -}} +{{- .Values.mongo.port }} +{{- end -}} +{{- end -}} + +{{/* +Set minio host +*/}} +{{- define "minio.host" -}} +{{- if .Values.minio.enabled -}} +{{- .Values.minio.host }} +{{- end -}} +{{- end -}} + +{{/* +Set minio port +*/}} +{{- define "minio.port" -}} +{{- if .Values.minio.enabled -}} +{{- .Values.minio.port }} +{{- end -}} +{{- end -}} + + +{{- define "imagePullSecret" }} +{{- with .Values.global.imageCredentials }} +{{- printf "{\"auths\":{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"email\":\"%s\",\"auth\":\"%s\"}}}" .registry .username .password .email (printf "%s:%s" .username .password | b64enc) | b64enc }} +{{- end }} +{{- end }} + diff --git a/charts/init-data/templates/init-data/appdata-configmap.yaml b/charts/init-data/templates/init-data/appdata-configmap.yaml new file mode 100644 index 0000000..253a7ff --- /dev/null +++ b/charts/init-data/templates/init-data/appdata-configmap.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: init-appdata-env + namespace: {{.Release.Namespace}} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-delete-policy": "{{ if .Values.hooks.removeOnSuccess }}hook-succeeded,{{ end }}before-hook-creation" + "helm.sh/hook-weight": "-5" +data: +{{- range $key, $val := .Values.initAppData }} + {{ $key }}: {{ $val | quote }} +{{- end}} + + diff --git a/charts/init-data/templates/init-data/init-appdata.job.yaml b/charts/init-data/templates/init-data/init-appdata.job.yaml new file mode 100644 index 0000000..332c6bd --- /dev/null +++ b/charts/init-data/templates/init-data/init-appdata.job.yaml @@ -0,0 +1,81 @@ +{{- if .Values.mysql.enabled -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: init-appdata + namespace: {{.Release.Namespace}} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-delete-policy": "{{ if .Values.hooks.removeOnSuccess }}hook-succeeded,{{ end }}before-hook-creation" + "helm.sh/hook-weight": "30" +spec: + {{- if .Values.hooks.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }} + {{- end}} + #completionMode: Indexed + ttlSecondsAfterFinished: 180 + template: + spec: + {{- if .Values.container.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.container.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hooks.initCheck.tolerations }} + tolerations: + {{ toYaml .Values.hooks.initCheck.tolerations | indent 8 }} + {{- end }} + containers: + - name: init-appdata + image: {{ .Values.images.initAppData }} + imagePullPolicy: IfNotPresent + env: + - name: MYSQL_HOST + value: {{.Values.mysql.host}} + - name: MYSQL_PORT + value: "{{.Values.mysql.port}}" + - name: MYSQL_USERNAME + value: {{.Values.mysql.username}} + - name: MYSQL_PASSWORD + value: {{.Values.mysql.password}} + - name: MYSQL_DATABASE + value: {{.Values.mysql.database}} + - name: DATABASE_TABLE_PREFIX + value: {{.Values.mysql.tablePrefix}} + - name: AWS_ENDPOINT + value: "{{.Values.minio.schema}}://{{.Values.minio.host}}:{{.Values.minio.port}}" + - name: AWS_ACCESS_KEY + value: {{.Values.minio.accessKey}} + - name: AWS_ACCESS_SECRET + value: {{.Values.minio.secretKey}} + - name: AWS_REGION + value: {{.Values.minio.region}} + - name: OSS_BUCKET_NAME + value: {{.Values.minio.bucket}} + - name: ASSETS_BUCKET + value: {{.Values.minio.bucket}} + - name: OSS_ENABLED + value: "true" + - name: LOADER_TEMPLATE_SPACE_ID + value: "spcNTxlv8Drra" + - name: LOADER_WIDGET_SPACE_ID + value: "spcNTxlv8Drra" + - name: SKIP_WIDGET_CENTER_LOADER + value: "{{.Values.initAppData.skipWidgetCenterLoader}}" + - name: INIT_TEST_ACCOUNT_ENABLED + value: "{{.Values.initAppData.init_test_account_enabled}}" + envFrom: + - configMapRef: + name: init-appdata-env + optional: false + + resources: +{{ toYaml .Values.global.resources | indent 12 }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 8 }} + {{- end }} + restartPolicy: Never + backoffLimit: 10 +{{- end }} diff --git a/charts/init-data/templates/init-data/init-check.job.yaml b/charts/init-data/templates/init-data/init-check.job.yaml new file mode 100644 index 0000000..3898a1c --- /dev/null +++ b/charts/init-data/templates/init-data/init-check.job.yaml @@ -0,0 +1,162 @@ +{{- if .Values.hooks.enabled -}} +{{- $redisHost := include "redis.host" . -}} +{{- $redisPort := include "redis.port" . -}} +{{- $mysqlHost := include "mysql.host" . -}} +{{- $mysqlPort := include "mysql.port" . -}} +{{- $minioHost := include "minio.host" . -}} +{{- $minioPort := include "minio.port" . -}} +{{- $mongoHost := include "mongo.host" . -}} +{{- $mongoPort := include "mongo.port" . -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Values.fullname }}-init-check + labels: + app: initCheck + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-delete-policy": "before-hook-creation" + "helm.sh/hook-weight": "-1" +spec: + {{- if .Values.hooks.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }} + {{- end}} + template: + metadata: + name: {{ .Values.fullname }}-init-check + annotations: + {{- if .Values.hooks.initCheck.podAnnotations }} + {{ toYaml .Values.hooks.initCheck.podAnnotations | indent 8 }} + {{- end }} + labels: + app: {{ .Values.fullname }}-init-check + release: "{{ .Release.Name }}" + {{- if .Values.hooks.initCheck.podLabels }} + {{ toYaml .Values.hooks.initCheck.podLabels | indent 8 }} + {{- end }} + spec: + {{- if .Values.hooks.initCheck.affinity }} + affinity: +{{ toYaml .Values.hooks.initCheck.affinity | indent 8 }} + {{- end }} + {{- if .Values.container.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.container.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hooks.initCheck.tolerations }} + tolerations: +{{ toYaml .Values.hooks.initCheck.tolerations | indent 8 }} + {{- end }} + restartPolicy: Never + {{- if .Values.hooks.initCheck.image.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.hooks.initCheck.image.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.hooks.securityContext }} + securityContext: +{{ toYaml .Values.hooks.securityContext | indent 8 }} + {{- end }} + containers: + - name: db-check + image: {{.Values.images.initCheck }} + imagePullPolicy: {{ default "IfNotPresent" .Values.hooks.initCheck.image.pullPolicy }} + command: + - /bin/sh + - -c + - | + echo "Checking if REDIS is up" + REDIS_STATUS=0 + while [ $REDIS_STATUS -eq 0 ]; do + REDIS_STATUS=1 + {{- if .Values.redis.enabled }} + REDIS_HOST={{ $redisHost }} + {{- end }} + if ! nc -z "$REDIS_HOST" {{ $redisPort }}; then + REDIS_STATUS=0 + echo "$REDIS_HOST is not available yet" + fi + if [ "$REDIS_STATUS" -eq 0 ]; then + echo "REDIS not ready. Sleeping for 10s before trying again" + sleep 10; + fi + done + echo "REDIS is up" + + echo "Checking if mongo is up" + MONGO_STATUS=0 + while [ $MONGO_STATUS -eq 0 ]; do + MONGO_STATUS=1 + MONGO_SREPLICAS={{ .Values.mongo.enabled | ternary .Values.mongo.replicaCount 1 }} + i=0; while [ $i -lt $MONGO_REPLICAS ]; do + {{- if .Values.mongo.enabled }} + MONGO_HOST={{ $mongoHost }}-$i.{{ $mongoHost }}-headless + {{- else }} + MONGO_HOST={{ $mongoHost }} + {{- end }} + if ! nc -z "$MONGO_HOST" {{ $mongoPort }}; then + MONGO_STATUS=0 + echo "$MONGO_HOST is not available yet" + fi + i=$((i+1)) + done + if [ "$MONGO_STATUS" -eq 0 ]; then + echo "MONGO not ready. Sleeping for 10s before trying again" + sleep 10; + fi + done + echo "mongo is up" + + echo "Checking if mysql is up" + MYSQL_STATUS=0 + while [ $MYSQL_STATUS -eq 0 ]; do + MYSQL_STATUS=1 + {{- if .Values.mysql.enabled }} + mysql_HOST={{ $mysqlHost }} + if ! nc -z "$mysql_HOST" {{ $mysqlPort }}; then + MYSQL_STATUS=0 + echo "$mysql_HOST is not available yet" + fi + {{- else -}} + MYSQL_STATUS=0 + {{- end }} + if [ "$MYSQL_STATUS" -eq 0 ]; then + echo "mysql not ready. Sleeping for 10s before trying again" + sleep 10; + fi + done + echo "mysql is up" + + echo "Checking if minio is up" + MINIO_STATUS=0 + while [ $MINIO_STATUS -eq 0 ]; do + MINIO_STATUS=1 + {{- if .Values.minio.enabled }} + minio_HOST={{ $minioHost }} + if ! nc -z "$minio_HOST" {{ $minioPort }}; then + MINIO_STATUS=0 + echo "$minio_HOST is not available yet" + fi + {{- else -}} + MYSQL_STATUS=0 + {{- end }} + if [ "$MINIO_STATUS" -eq 0 ]; then + echo "minio not ready. Sleeping for 10s before trying again" + sleep 10; + fi + done + echo "minio is up" + + env: +{{- if .Values.hooks.initCheck.env }} +{{ toYaml .Values.hooks.initCheck.env | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.hooks.initCheck.resources | indent 10 }} + {{- if .Values.hooks.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.hooks.shareProcessNamespace }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/init-data/templates/init-data/init-data-db-enterprise.job.yaml b/charts/init-data/templates/init-data/init-data-db-enterprise.job.yaml new file mode 100644 index 0000000..a9d9b2c --- /dev/null +++ b/charts/init-data/templates/init-data/init-data-db-enterprise.job.yaml @@ -0,0 +1,55 @@ +{{- if .Values.mysql.enabled -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: init-db-enterprise + namespace: {{.Release.Namespace}} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": "post-install" + "helm.sh/hook-delete-policy": "{{ if .Values.hooks.removeOnSuccess }}hook-succeeded,{{ end }}before-hook-creation" + "helm.sh/hook-weight": "2" +spec: + {{- if .Values.hooks.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }} + {{- end}} +# completionMode: Indexed + ttlSecondsAfterFinished: 120 + template: + spec: + {{- if .Values.container.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.container.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hooks.initCheck.tolerations }} + tolerations: + {{ toYaml .Values.hooks.initCheck.tolerations | indent 8 }} + {{- end }} + containers: + - name: init-db-enterprise + image: {{.Values.images.initDataDbEnterprise }} + env: + - name: DB_HOST + value: {{.Values.mysql.host}} + - name: DB_PORT + value: "{{.Values.mysql.port}}" + - name: DB_USERNAME + value: {{.Values.mysql.username}} + - name: DB_PASSWORD + value: {{.Values.mysql.password}} + - name: DB_NAME + value: {{.Values.mysql.database}} + - name: DATABASE_TABLE_PREFIX + value: {{.Values.mysql.tablePrefix}} + - name: EDITION + value: {{.Values.mysql.edition}} + - name: ACTION + value: 'update' + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 8 }} + {{- end }} + restartPolicy: Never + backoffLimit: 10 +{{- end }} \ No newline at end of file diff --git a/charts/init-data/templates/init-data/init-data-db.job.yaml b/charts/init-data/templates/init-data/init-data-db.job.yaml new file mode 100644 index 0000000..741e20a --- /dev/null +++ b/charts/init-data/templates/init-data/init-data-db.job.yaml @@ -0,0 +1,53 @@ +{{- if .Values.mysql.enabled -}} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: init-db + namespace: {{.Release.Namespace}} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": "post-install" + "helm.sh/hook-delete-policy": "{{ if .Values.hooks.removeOnSuccess }}hook-succeeded,{{ end }}before-hook-creation" + "helm.sh/hook-weight": "1" +spec: + {{- if .Values.hooks.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }} + {{- end}} +# completionMode: Indexed + ttlSecondsAfterFinished: 120 + template: + spec: + {{- if .Values.container.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.container.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hooks.initCheck.tolerations }} + tolerations: + {{ toYaml .Values.hooks.initCheck.tolerations | indent 8 }} + {{- end }} + containers: + - name: init-db + image: {{.Values.images.initDataDb }} + env: + - name: DB_HOST + value: {{.Values.mysql.host}} + - name: DB_PORT + value: "{{.Values.mysql.port}}" + - name: DB_USERNAME + value: {{.Values.mysql.username}} + - name: DB_PASSWORD + value: {{.Values.mysql.password}} + - name: DB_NAME + value: {{.Values.mysql.database}} + - name: DATABASE_TABLE_PREFIX + value: {{.Values.mysql.tablePrefix}} + - name: ACTION + value: 'update' + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 8 }} + {{- end }} + restartPolicy: Never + backoffLimit: 10 +{{- end }} \ No newline at end of file diff --git a/charts/init-data/templates/tests/.gitkeep b/charts/init-data/templates/tests/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/charts/init-data/values.yaml b/charts/init-data/values.yaml new file mode 100644 index 0000000..1f86db2 --- /dev/null +++ b/charts/init-data/values.yaml @@ -0,0 +1,75 @@ +enabled: true +fullname: vika +global: + imagePullSecrets: [] + resources: + limits: + cpu: 500m + memory: 4096Mi + requests: + cpu: 200m + memory: 2048Mi +container: + nodeSelector: {} +hooks: + enabled: true + removeOnSuccess: false + activeDeadlineSeconds: 1800 + shareProcessNamespace: false + initCheck: + image: + # repository: subfuzion/netcat + # tag: latest + # pullPolicy: IfNotPresent + imagePullSecrets: [] + env: [ ] + # podLabels: [] + podAnnotations: { } + resources: + limits: + memory: 64Mi + requests: + cpu: 100m + memory: 64Mi + affinity: {} + securityContext: {} + # tolerations: [] +mysql: + enabled: true + host: "mysql-primary.vika-datacenter" + namespace: vika-datacenter + port: 3306 + username: root + password: "vikadata@com" + database: vikadata + tablePrefix: vika_ + edition: vika-saas +minio: + enabled: true + host: minio + port: 9000 + accessKey: vika + secretKey: "vikadata@com" + bucket: vk-assets-ltd + schema: http + region: us-east-1 +mongo: + enabled: false + host: mongo + port: 27017 + replicaCount: 1 +redis: + enabled: true + host: "redis-master" + port: 6379 +images: + initDataDb: docker.vika.ltd/vikadata/vika/init-db-enterprise:v0.16.0-alpha_build402 + initDataDbEnterprise: docker.vika.ltd/vikadata/vika/init-db-enterprise:v0.16.0-alpha_build402 + initAppData: docker.vika.ltd/vikadata/apitable-ce/init-appdata:v0.16.0-alpha_build25 + initCheck: subfuzion/netcat:latest +initAppData: + skipWidgetCenterLoader: "false" + init_test_account_enabled: "false" + LOADER_TEMPLATE_SPACE_ID: "spcNTxlv8Drra" + LOADER_WIDGET_SPACE_ID: "spcNTxlv8Drra" + diff --git a/charts/modules/minio/.helmignore b/charts/modules/minio/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/charts/modules/minio/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/modules/minio/Chart.lock b/charts/modules/minio/Chart.lock new file mode 100644 index 0000000..7d0b5e4 --- /dev/null +++ b/charts/modules/minio/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.2.1 +digest: sha256:6c67cfa9945bf608209d4e2ca8f17079fca4770907c7902d984187ab5b21811e +generated: "2022-11-27T07:38:13.612960957Z" diff --git a/charts/modules/minio/Chart.yaml b/charts/modules/minio/Chart.yaml new file mode 100644 index 0000000..800fd7d --- /dev/null +++ b/charts/modules/minio/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2022.11.29 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: MinIO(R) is an object storage server, compatible with Amazon S3 cloud + storage service, mainly used for storing unstructured data (such as photos, videos, + log files, etc.). +home: https://github.com/bitnami/charts/tree/main/bitnami/minio +icon: https://bitnami.com/assets/stacks/minio/img/minio-stack-220x234.png +keywords: +- minio +- storage +- object-storage +- s3 +- cluster +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: minio +sources: +- https://github.com/bitnami/containers/tree/main/bitnami/minio +- https://min.io +version: 11.10.20 diff --git a/charts/modules/minio/README.md b/charts/modules/minio/README.md new file mode 100644 index 0000000..1b3188d --- /dev/null +++ b/charts/modules/minio/README.md @@ -0,0 +1,592 @@ + + +# Bitnami Object Storage based on MinIO(R) + +MinIO(R) is an object storage server, compatible with Amazon S3 cloud storage service, mainly used for storing unstructured data (such as photos, videos, log files, etc.). + +[Overview of Bitnami Object Storage based on MinIO®](https://min.io/) + +Disclaimer: All software products, projects and company names are trademark(TM) or registered(R) trademarks of their respective holders, and use of them does not imply any affiliation or endorsement. This software is licensed to you subject to one or more open source licenses and VMware provides the software on an AS-IS basis. MinIO(R) is a registered trademark of the MinIO Inc. in the US and other countries. Bitnami is not affiliated, associated, authorized, endorsed by, or in any way officially connected with MinIO Inc. MinIO(R) is licensed under GNU AGPL v3.0. + +## TL;DR + +```console +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/minio +``` + +## Introduction + +This chart bootstraps a [MinIO®](https://github.com/bitnami/containers/tree/main/bitnami/minio) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure +- ReadWriteMany volume for Gateway NAS deployment scaling + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/minio +``` + +These commands deploy MinIO® on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | + + +### MinIO® parameters + +| Name | Description | Value | +| -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `image.registry` | MinIO® image registry | `docker.io` | +| `image.repository` | MinIO® image repository | `bitnami/minio` | +| `image.tag` | MinIO® image tag (immutable tags are recommended) | `2022.11.29-debian-11-r0` | +| `image.digest` | MinIO® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `clientImage.registry` | MinIO® Client image registry | `docker.io` | +| `clientImage.repository` | MinIO® Client image repository | `bitnami/minio-client` | +| `clientImage.tag` | MinIO® Client image tag (immutable tags are recommended) | `2022.11.17-debian-11-r4` | +| `clientImage.digest` | MinIO® Client image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `mode` | MinIO® server mode (`standalone` or `distributed`) | `standalone` | +| `auth.rootUser` | MinIO® root username | `admin` | +| `auth.rootPassword` | Password for MinIO® root user | `""` | +| `auth.existingSecret` | Use existing secret for credentials details (`auth.rootUser` and `auth.rootPassword` will be ignored and picked up from this secret). The secret has to contain the keys `root-user` and `root-password`) | `""` | +| `auth.forcePassword` | Force users to specify required passwords | `false` | +| `auth.useCredentialsFiles` | Mount credentials as a files instead of using an environment variable | `false` | +| `auth.forceNewKeys` | Force root credentials (user and password) to be reconfigured every time they change in the secrets | `false` | +| `defaultBuckets` | Comma, semi-colon or space separated list of buckets to create at initialization (only in standalone mode) | `""` | +| `disableWebUI` | Disable MinIO® Web UI | `false` | +| `tls.enabled` | Enable tls in front of the container | `false` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `tls.existingSecret` | Name of an existing secret holding the certificate information | `""` | +| `tls.mountPath` | The mount path where the secret will be located | `""` | +| `extraEnvVars` | Extra environment variables to be set on MinIO® container | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `command` | Default container command (useful when using custom images). Use array form | `[]` | +| `args` | Default container args (useful when using custom images). Use array form | `[]` | + + +### MinIO® deployment/statefulset parameters + +| Name | Description | Value | +| ---------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `schedulerName` | Specifies the schedulerName, if it's nil uses kube-scheduler | `""` | +| `terminationGracePeriodSeconds` | In seconds, time the given to the MinIO pod needs to terminate gracefully | `""` | +| `deployment.updateStrategy.type` | Deployment strategy type | `Recreate` | +| `statefulset.updateStrategy.type` | StatefulSet strategy type | `RollingUpdate` | +| `statefulset.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `statefulset.replicaCount` | Number of pods per zone (only for MinIO® distributed mode). Should be even and `>= 4` | `4` | +| `statefulset.zones` | Number of zones (only for MinIO® distributed mode) | `1` | +| `statefulset.drivesPerNode` | Number of drives attached to every node (only for MinIO® distributed mode) | `1` | +| `provisioning.enabled` | Enable MinIO® provisioning Job | `false` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for MinIO® provisioning | `""` | +| `provisioning.podAnnotations` | Provisioning Pod annotations. | `{}` | +| `provisioning.command` | Default provisioning container command (useful when using custom images). Use array form | `[]` | +| `provisioning.args` | Default provisioning container args (useful when using custom images). Use array form | `[]` | +| `provisioning.extraCommands` | Optionally specify extra list of additional commands for MinIO® provisioning pod | `[]` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for MinIO® provisioning pod | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for MinIO® provisioning container | `[]` | +| `provisioning.resources.limits` | The resources limits for the container | `{}` | +| `provisioning.resources.requests` | The requested resources for the container | `{}` | +| `provisioning.policies` | MinIO® policies provisioning | `[]` | +| `provisioning.users` | MinIO® users provisioning. Can be used in addition to provisioning.usersExistingSecrets. | `[]` | +| `provisioning.usersExistingSecrets` | Array if existing secrets containing MinIO® users to be provisioned. Can be used in addition to provisioning.users. | `[]` | +| `provisioning.groups` | MinIO® groups provisioning | `[]` | +| `provisioning.buckets` | MinIO® buckets, versioning, lifecycle, quota and tags provisioning | `[]` | +| `provisioning.config` | MinIO® config provisioning | `[]` | +| `provisioning.podSecurityContext.enabled` | Enable pod Security Context | `true` | +| `provisioning.podSecurityContext.fsGroup` | Group ID for the container | `1001` | +| `provisioning.containerSecurityContext.enabled` | Enable container Security Context | `true` | +| `provisioning.containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Avoid running as root User | `true` | +| `hostAliases` | MinIO® pod host aliases | `[]` | +| `containerPorts.api` | MinIO® container port to open for MinIO® API | `9000` | +| `containerPorts.console` | MinIO® container port to open for MinIO® Console | `9001` | +| `podSecurityContext.enabled` | Enable pod Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the container | `1001` | +| `containerSecurityContext.enabled` | Enable container Security Context | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `containerSecurityContext.runAsNonRoot` | Avoid running as root User | `true` | +| `podLabels` | Extra labels for MinIO® pods | `{}` | +| `podAnnotations` | Annotations for MinIO® pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template. | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template. | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template. | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for MinIO® pods assignment spread across your cluster among failure-domains | `[]` | +| `priorityClassName` | MinIO® pods' priorityClassName | `""` | +| `resources.limits` | The resources limits for the MinIO® container | `{}` | +| `resources.requests` | The requested resources for the MinIO® container | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `0` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `60` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Override default startup probe | `{}` | +| `lifecycleHooks` | for the MinIO® container(s) to automate configuration before or after startup | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for MinIO® pods | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for MinIO® container(s) | `[]` | +| `initContainers` | Add additional init containers to the MinIO® pods | `[]` | +| `sidecars` | Add additional sidecar containers to the MinIO® pods | `[]` | + + +### Traffic exposure parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | MinIO® service type | `ClusterIP` | +| `service.ports.api` | MinIO® API service port | `9000` | +| `service.ports.console` | MinIO® Console service port | `9001` | +| `service.nodePorts.api` | Specify the MinIO® API nodePort value for the LoadBalancer and NodePort service types | `""` | +| `service.nodePorts.console` | Specify the MinIO® Console nodePort value for the LoadBalancer and NodePort service types | `""` | +| `service.clusterIP` | Service Cluster IP | `""` | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` (optional, cloud specific) | `""` | +| `service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.extraPorts` | Extra ports to expose in the service (normally used with the `sidecar` value) | `[]` | +| `service.annotations` | Annotations for MinIO® service | `{}` | +| `ingress.enabled` | Enable ingress controller resource for MinIO Console | `false` | +| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.hostname` | Default host for the ingress resource | `minio.local` | +| `ingress.path` | The Path to MinIO®. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.servicePort` | Service port to be used | `minio-console` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraPaths` | Any additional paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` | +| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `apiIngress.enabled` | Enable ingress controller resource for MinIO API | `false` | +| `apiIngress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `apiIngress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `apiIngress.hostname` | Default host for the ingress resource | `minio.local` | +| `apiIngress.path` | The Path to MinIO®. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `apiIngress.pathType` | Ingress path type | `ImplementationSpecific` | +| `apiIngress.servicePort` | Service port to be used | `minio-api` | +| `apiIngress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `apiIngress.tls` | Enable TLS configuration for the hostname defined at `apiIngress.hostname` parameter | `false` | +| `apiIngress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `apiIngress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `apiIngress.extraPaths` | Any additional paths that may need to be added to the ingress under the main host | `[]` | +| `apiIngress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `apiIngress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` | +| `apiIngress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `networkPolicy.enabled` | Enable the default NetworkPolicy policy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.extraFromClauses` | Allows to add extra 'from' clauses to the NetworkPolicy | `{}` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | -------------------------------------------------------------------- | ------------------- | +| `persistence.enabled` | Enable MinIO® data persistence using PVC. If false, use emptyDir | `true` | +| `persistence.storageClass` | PVC Storage Class for MinIO® data volume | `""` | +| `persistence.mountPath` | Data volume mount path | `/data` | +| `persistence.accessModes` | PVC Access Modes for MinIO® data volume | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for MinIO® data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (only in `standalone` mode) | `""` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r57` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### RBAC parameters + +| Name | Description | Value | +| --------------------------------------------- | ----------------------------------------------------------- | ------ | +| `serviceAccount.create` | Enable the creation of a ServiceAccount for MinIO® pods | `true` | +| `serviceAccount.name` | Name of the created ServiceAccount | `""` | +| `serviceAccount.automountServiceAccountToken` | Enable/disable auto mounting of the service account token | `true` | +| `serviceAccount.annotations` | Custom annotations for MinIO® ServiceAccount | `{}` | + + +### Other parameters + +| Name | Description | Value | +| -------------------- | --------------------------------------------------------------------------------- | ------- | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that must still be available after the eviction | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable after the eviction | `""` | + + +### Metrics parameters + +| Name | Description | Value | +| ------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------- | --------------------------- | +| `metrics.prometheusAuthType` | Authentication mode for Prometheus (`jwt` or `public`) | `public` | +| `metrics.serviceMonitor.enabled` | If the operator is installed in your cluster, set to true to create a Service Monitor Entry | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | +| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in Prometheus | `""` | +| `metrics.serviceMonitor.path` | HTTP path to scrape for metrics | `/minio/v2/metrics/cluster` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.relabelings` | Metrics relabelings to add to the scrape endpoint, applied before scraping | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.apiVersion` | ApiVersion for the serviceMonitor Resource (defaults to "monitoring.coreos.com/v1") | `""` | +| `metrics.prometheusRule.enabled` | Create a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | Prometheus Rule definitions | `[]` | + + +### Gateway parameters + +| Name | Description | Value | +| -------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------- | +| `gateway.enabled` | Use MinIO® as Gateway for other storage systems | `false` | +| `gateway.type` | Gateway type. Supported types are: `azure`, `gcs`, `nas`, `s3` | `s3` | +| `gateway.replicaCount` | Number of MinIO® Gateway replicas | `4` | +| `gateway.updateStrategy.type` | Update strategy type for MinIO® Gateway replicas | `Recreate` | +| `gateway.autoscaling.enabled` | Enable autoscaling for MinIO® Gateway deployment | `false` | +| `gateway.autoscaling.minReplicas` | Minimum number of replicas to scale back | `4` | +| `gateway.autoscaling.maxReplicas` | Maximum number of replicas to scale out | `4` | +| `gateway.autoscaling.targetCPU` | Target CPU utilization percentage | `""` | +| `gateway.autoscaling.targetMemory` | Target Memory utilization percentage | `""` | +| `gateway.priorityClassName` | Pod priority class name for MinIO® Gateway | `""` | +| `gateway.auth.azure.accessKey` | Access key to access MinIO® using Azure Gateway | `""` | +| `gateway.auth.azure.secretKey` | Secret key to access MinIO® using Azure Gateway | `""` | +| `gateway.auth.azure.serviceEndpoint` | Azure Blob Storage custom endpoint | `""` | +| `gateway.auth.azure.storageAccountName` | Azure Storage Account Name to use to access Azure Blob Storage | `""` | +| `gateway.auth.azure.storageAccountKey` | Azure Storage Account Key to use to access Azure Blob Storage | `""` | +| `gateway.auth.azure.storageAccountNameExistingSecret` | Existing Secret name to extract Azure Storage Account Name from to access Azure Blob Storage | `""` | +| `gateway.auth.azure.storageAccountNameExistingSecretKey` | Existing Secret key to extract Azure Storage Account Name from to use to access Azure Blob Storage | `""` | +| `gateway.auth.azure.storageAccountKeyExistingSecret` | Existing Secret name to extract Azure Storage Account Key from to access Azure Blob Storage | `""` | +| `gateway.auth.azure.storageAccountKeyExistingSecretKey` | Existing Secret key to extract Azure Storage Account Key from to use to access Azure Blob Storage | `""` | +| `gateway.auth.gcs.accessKey` | Access key to access MinIO® using GCS Gateway | `""` | +| `gateway.auth.gcs.secretKey` | Secret key to access MinIO® using GCS Gateway | `""` | +| `gateway.auth.gcs.keyJSON` | Service Account key to access GCS | `""` | +| `gateway.auth.gcs.projectID` | GCP Project ID to use | `""` | +| `gateway.auth.nas.accessKey` | Access key to access MinIO® using NAS Gateway | `""` | +| `gateway.auth.nas.secretKey` | Secret key to access MinIO® using NAS Gateway | `""` | +| `gateway.auth.s3.accessKey` | Access key to use to access AWS S3 | `""` | +| `gateway.auth.s3.secretKey` | Secret key to use to access AWS S3 | `""` | +| `gateway.auth.s3.serviceEndpoint` | AWS S3 endpoint | `https://s3.amazonaws.com` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set auth.rootUser=minio-admin \ + --set auth.rootPassword=minio-secret-password \ + my-repo/minio +``` + +The above command sets the MinIO® Server root user and password to `minio-admin` and `minio-secret-password`, respectively. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml my-repo/minio +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Distributed mode + +By default, this chart provisions a MinIO® server in standalone mode. You can start MinIO® server in [distributed mode](https://docs.minio.io/docs/distributed-minio-quickstart-guide) with the following parameter: `mode=distributed` + +This chart bootstrap MinIO® server in distributed mode with 4 nodes by default. You can change the number of nodes using the `statefulset.replicaCount` parameter. For instance, you can deploy the chart with 8 nodes using the following parameters: + +```console +mode=distributed +statefulset.replicaCount=8 +``` + +You can also bootstrap MinIO® server in distributed mode in several zones, and using multiple drives per node. For instance, you can deploy the chart with 2 nodes per zone on 2 zones, using 2 drives per node: + +```console +mode=distributed +statefulset.replicaCount=2 +statefulset.zones=2 +statefulset.drivesPerNode=2 +``` + +> Note: The total number of drives should be greater than 4 to guarantee erasure coding. Please set a combination of nodes, and drives per node that match this condition. + +### Prometheus exporter + +MinIO® exports Prometheus metrics at `/minio/v2/metrics/cluster`. To allow Prometheus collecting your MinIO® metrics, modify the `values.yaml` adding the corresponding annotations: + +```diff +- podAnnotations: {} ++ podAnnotations: ++ prometheus.io/scrape: "true" ++ prometheus.io/path: "/minio/v2/metrics/cluster" ++ prometheus.io/port: "9000" +``` + +> Find more information about MinIO® metrics at https://docs.min.io/docs/how-to-monitor-minio-using-prometheus.html + +## Persistence + +The [Bitnami Object Storage based on MinIO(®)](https://github.com/bitnami/containers/tree/main/bitnami/minio) image stores data at the `/data` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Ingress + +This chart provides support for Ingress resources. If you have an ingress controller installed on your cluster, such as [nginx-ingress-controller](https://github.com/bitnami/charts/tree/main/bitnami/nginx-ingress-controller) or [contour](https://github.com/bitnami/charts/tree/main/bitnami/contour) you can utilize the ingress controller to serve your application. + +To enable Ingress integration, set `ingress.enabled` to `true`. The `ingress.hostname` property can be used to set the host name. The `ingress.tls` parameter can be used to add the TLS configuration for this host. It is also possible to have more than one host, with a separate TLS configuration for each host. [Learn more about configuring and using Ingress](https://docs.bitnami.com/kubernetes/infrastructure/geode/configuration/configure-ingress/). + +### TLS secrets + +The chart also facilitates the creation of TLS secrets for use with the Ingress controller, with different options for certificate management. [Learn more about TLS secrets](https://docs.bitnami.com/kubernetes/infrastructure/geode/administration/enable-tls-ingress/). + +### MinIO® Gateway + +MinIO® can be configured as a Gateway for other other storage systems. Currently this chart supports to setup MinIO® as a Gateway for the storage systems below: + +- [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) +- [GCS](https://cloud.google.com/storage) +- NAS: Network Attached Storage +- [AWS S3](https://aws.amazon.com/s3/) + +The enable this feature, install the chart setting `gateway.enabled` to `true`. You can choose the Gateway type setting the `gateway.type` parameter. For instance, to install the chart as a S3 Gateway, install the chart the using the following parameters: + +```console +gateway.enabled=true +gateway.replicaCount=4 +gateway.type=s3 +gateway.auth.s3.serviceEndpoint=https://s3.amazonaws.com +gateway.auth.s3.accessKey=S3_ACCESS_KEY +gateway.auth.s3.secretKey=S3_SECRET_KEY +``` + +> Note: remember to replace the S3_ACCESS_KEY and S3_SECRET_KEY placeholders with your actual S3 access & secret keys. + +Find all the available parameters to configure MinIO® as a Gateway in the [Gateway parameters section](#gateway-parameters). + +> Note: when using MinIO® as a NAS Gateway, you need ReadWriteMany PVs to deploy multiple MinIO® instances. Ensure you K8s cluster supports this kind of cluster, and install the chart setting `persistence.accessModes[0]` to `ReadWriteMany` to do so. + +### Adding extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: MINIO_LOG_LEVEL + value: DEBUG +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as the MinIO® app (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such a ConfigMap containing your app's configuration or some extra deployment with a micro service used by your app. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 11.0.0 + +This version deprecates the usage of `MINIO_ACCESS_KEY` and `MINIO_SECRET_KEY` environment variables in MINIO® container in favor of `MINIO_ROOT_USER` and `MINIO_ROOT_PASSWORD`. + +If you were already using the new variables, no issues are expected during upgrade. + +### To 9.0.0 + +This version updates MinIO® authentication parameters so they're aligned with the [current terminology](https://docs.min.io/minio/baremetal/security/minio-identity-management/user-management.html#minio-users-root). As a result the following parameters have been affected: + +- `accessKey.password` has been renamed to `auth.rootUser`. +- `secretKey.password` has been renamed to `auth.rootPassword`. +- `accessKey.forcePassword` and `secretKey.forcePassword` have been unified into `auth.forcePassword`. +- `existingSecret`, `useCredentialsFile` and `forceNewKeys` have been renamed to `auth.existingSecret`, `auth.useCredentialsFiles` and `forceNewKeys`, respectively. + +### To 8.0.0 + +This version updates MinIO® after some major changes, affecting its Web UI. MinIO® has replaced its MinIO® Browser with the MinIO® Console, and Web UI has been moved to a separated port. As a result the following variables have been affected: +- `service.port` has been slit into `service.ports.api` (default: 9000) and `service.ports.console` (default: 9001). +- `containerPort` has been slit into `containerPorts.api` (default: 9000) and `containerPort.console` (default: 9001). +- `service.nodePort`has been slit into `service.nodePorts.api` and `service.nodePorts.console`. +- Service port `minio` has been replaced with `minio-api` and `minio-console` with target ports minio-api and minio-console respectively. +- Liveness, readiness and startup probes now use port `minio-console` instead of `minio`. + +Please note that Web UI, previously running on port 9000 will now use port 9001 leaving port 9000 for the MinIO® Server API. + +### To 7.0.0 + +This version introduces pod and container securityContext support. The previous configuration of `securityContext` has moved to `podSecurityContext` and `containerSecurityContext`. Apart from this case, no issues are expected to appear when upgrading. + +### To 5.0.0 + +This version standardizes the way of defining Ingress rules. When configuring a single hostname for the Ingress rule, set the `ingress.hostname` value. When defining more than one, set the `ingress.extraHosts` array. Apart from this case, no issues are expected to appear when upgrading. + +### To 4.1.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 4.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/charts/modules/minio/charts/common/Chart.yaml b/charts/modules/minio/charts/common/Chart.yaml new file mode 100644 index 0000000..653c063 --- /dev/null +++ b/charts/modules/minio/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.2.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.2.1 diff --git a/charts/modules/minio/charts/common/README.md b/charts/modules/minio/charts/common/README.md new file mode 100644 index 0000000..ec43a5f --- /dev/null +++ b/charts/modules/minio/charts/common/README.md @@ -0,0 +1,351 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/charts/modules/minio/charts/common/templates/_affinities.tpl b/charts/modules/minio/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..81902a6 --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_affinities.tpl @@ -0,0 +1,106 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_capabilities.tpl b/charts/modules/minio/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..9d9b760 --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_errors.tpl b/charts/modules/minio/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_images.tpl b/charts/modules/minio/charts/common/templates/_images.tpl new file mode 100644 index 0000000..46c659e --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_ingress.tpl b/charts/modules/minio/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..831da9c --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_labels.tpl b/charts/modules/minio/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_names.tpl b/charts/modules/minio/charts/common/templates/_names.tpl new file mode 100644 index 0000000..1bdac8b --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_secrets.tpl b/charts/modules/minio/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4267d42 --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_storage.tpl b/charts/modules/minio/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_tplvalues.tpl b/charts/modules/minio/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_utils.tpl b/charts/modules/minio/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..8c22b2a --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/_warnings.tpl b/charts/modules/minio/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/charts/modules/minio/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/validations/_cassandra.tpl b/charts/modules/minio/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/charts/modules/minio/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/validations/_mariadb.tpl b/charts/modules/minio/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/charts/modules/minio/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/validations/_mongodb.tpl b/charts/modules/minio/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..f820ec1 --- /dev/null +++ b/charts/modules/minio/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/validations/_mysql.tpl b/charts/modules/minio/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 0000000..74472a0 --- /dev/null +++ b/charts/modules/minio/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/validations/_postgresql.tpl b/charts/modules/minio/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/charts/modules/minio/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/validations/_redis.tpl b/charts/modules/minio/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..dcccfc1 --- /dev/null +++ b/charts/modules/minio/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/templates/validations/_validations.tpl b/charts/modules/minio/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/charts/modules/minio/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/minio/charts/common/values.yaml b/charts/modules/minio/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/charts/modules/minio/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/charts/modules/minio/templates/NOTES.txt b/charts/modules/minio/templates/NOTES.txt new file mode 100644 index 0000000..f1b9c81 --- /dev/null +++ b/charts/modules/minio/templates/NOTES.txt @@ -0,0 +1,91 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.gateway.enabled }} +MinIO® deployed as a {{ upper .Values.gateway.type }} Gateway +{{- end }} + +MinIO® can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To get your credentials run: + + export ROOT_USER=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "minio.secretName" . }} -o jsonpath="{.data.root-user}" | base64 -d) + export ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "minio.secretName" . }} -o jsonpath="{.data.root-password}" | base64 -d) + +To connect to your MinIO® server using a client: + +- Run a MinIO® Client pod and append the desired command (e.g. 'admin info'): + + kubectl run --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }}-client \ + --rm --tty -i --restart='Never' \ + --env MINIO_SERVER_ROOT_USER=$ROOT_USER \ + --env MINIO_SERVER_ROOT_PASSWORD=$ROOT_PASSWORD \ + --env MINIO_SERVER_HOST={{ include "common.names.fullname" . }} \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ include "common.names.fullname" . }}-client=true" \ + {{- end }} + --image {{ template "minio.clientImage" . }} -- admin info minio + +{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + + NOTE: Since NetworkPolicy is enabled, only pods with label + "{{ template "common.names.fullname" . }}-client=true" will be able to connect to MinIO®. + +{{- end }} +{{- if or .Values.gateway.enabled (not .Values.disableWebUI) }} + +To access the MinIO® web UI: + +- Get the MinIO® URL: + +{{- if .Values.ingress.enabled }} + + You should be able to access your new MinIO® web UI through + + {{ if .Values.ingress.tls }}https{{ else }}http{{ end }}://{{ .Values.ingress.hostname }}/minio/ +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}' + + {{- $port:=.Values.service.ports.console | toString }} + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "MinIO® web URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.service.ports.console }}{{ end }}/minio" + +{{- else if contains "ClusterIP" .Values.service.type }} + + echo "MinIO® web URL: http://127.0.0.1:{{ .Values.containerPorts.console }}/minio" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.containerPorts.console }}:{{ .Values.service.ports.console }} + +{{- else if contains "NodePort" .Values.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.names.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "MinIO® web URL: http://$NODE_IP:$NODE_PORT/minio" + +{{- end }} +{{- else }} + + WARN: MinIO® Web UI is disabled. +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.clientImage }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "minio.validateValues" . }} + +{{- $requiredPasswords := list -}} +{{- $secretName := include "minio.secretName" . -}} +{{- if and (not .Values.auth.existingSecret) (not .Values.auth.forceNewKeys) (not .Values.gateway.enabled) -}} + {{- $requiredRootUser := dict "valueKey" "auth.rootUser" "secret" $secretName "field" "root-user" -}} + {{- $requiredRootPassword := dict "valueKey" "auth.rootPassword" "secret" $secretName "field" "root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootUser -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} +{{- end -}} +{{- $requiredMinioPasswordErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" $) -}} +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $requiredMinioPasswordErrors) "context" $) -}} diff --git a/charts/modules/minio/templates/_helpers.tpl b/charts/modules/minio/templates/_helpers.tpl new file mode 100644 index 0000000..66b34ed --- /dev/null +++ b/charts/modules/minio/templates/_helpers.tpl @@ -0,0 +1,316 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper MinIO® image name +*/}} +{{- define "minio.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} + +{{- end -}} + +{{/* +Return the proper MinIO® Client image name +*/}} +{{- define "minio.clientImage" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.clientImage "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "minio.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.clientImage .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} +{{- $len := (default 16 .Length) | int -}} +{{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} +{{- if $obj }} +{{- index $obj .Key | b64dec -}} +{{- else -}} +{{- randAlphaNum $len -}} +{{- end -}} +{{- end }} + +{{/* +Get the user to use to access MinIO® +*/}} +{{- define "minio.secret.userValue" -}} +{{- if .Values.gateway.enabled }} + {{- if eq .Values.gateway.type "azure" }} + {{- if .Values.gateway.auth.azure.accessKey }} + {{- .Values.gateway.auth.azure.accessKey -}} + {{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "root-user") -}} + {{- end -}} + {{- else if eq .Values.gateway.type "gcs" }} + {{- if .Values.gateway.auth.gcs.accessKey }} + {{- .Values.gateway.auth.gcs.accessKey -}} + {{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "root-user") -}} + {{- end -}} + {{- else if eq .Values.gateway.type "nas" }} + {{- if .Values.gateway.auth.nas.accessKey }} + {{- .Values.gateway.auth.nas.accessKey -}} + {{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "root-user") -}} + {{- end -}} + {{- else if and (eq .Values.gateway.type "s3") (not .Values.gateway.auth.s3.useIRSA ) }} + {{- .Values.gateway.auth.s3.accessKey -}} + {{- end -}} +{{- else }} + {{- if .Values.auth.rootUser }} + {{- .Values.auth.rootUser -}} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "root-user") -}} + {{- else -}} + {{ required "A root username is required!" .Values.auth.rootUser }} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password to use to access MinIO® +*/}} +{{- define "minio.secret.passwordValue" -}} +{{- if .Values.gateway.enabled }} + {{- if eq .Values.gateway.type "azure" }} + {{- if .Values.gateway.auth.azure.secretKey }} + {{- .Values.gateway.auth.azure.secretKey -}} + {{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "root-password") -}} + {{- end -}} + {{- else if eq .Values.gateway.type "gcs" }} + {{- if .Values.gateway.auth.gcs.secretKey }} + {{- .Values.gateway.auth.gcs.secretKey -}} + {{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "root-password") -}} + {{- end -}} + {{- else if eq .Values.gateway.type "nas" }} + {{- if .Values.gateway.auth.nas.secretKey }} + {{- .Values.gateway.auth.nas.secretKey -}} + {{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "root-password") -}} + {{- end -}} + {{- else if and (eq .Values.gateway.type "s3") (not .Values.gateway.auth.s3.useIRSA ) }} + {{- .Values.gateway.auth.s3.secretKey -}} + {{- end -}} +{{- else }} + {{- if .Values.auth.rootPassword }} + {{- .Values.auth.rootPassword -}} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "root-password") -}} + {{- else -}} + {{ required "A root password is required!" .Values.auth.rootPassword }} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Get the credentials secret. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "minio.createSecret" -}} +{{- if .Values.auth.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a PVC object should be created (only in standalone mode) +*/}} +{{- define "minio.createPVC" -}} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (or (and (eq .Values.mode "standalone") (not .Values.gateway.enabled)) (and .Values.gateway.enabled (eq .Values.gateway.type "nas"))) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the PVC name (only in standalone mode) +*/}} +{{- define "minio.claimName" -}} +{{- if and .Values.persistence.existingClaim }} + {{- printf "%s" (tpl .Values.persistence.existingClaim $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the proper service account name depending if an explicit service account name is set +in the values file. If the name is not set it will default to either common.names.fullname if serviceAccount.create +is true or default otherwise. +*/}} +{{- define "minio.serviceAccountName" -}} + {{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} + {{- else -}} + {{ default "default" .Values.serviceAccount.name }} + {{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "minio.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "minio.validateValues.mode" .) -}} +{{- $messages := append $messages (include "minio.validateValues.totalDrives" .) -}} +{{- $messages := append $messages (include "minio.validateValues.tls" .) -}} +{{- $messages := append $messages (include "minio.validateValues.gateway.type" .) -}} +{{- $messages := append $messages (include "minio.validateValues.gateway.azure.credentials" .) -}} +{{- $messages := append $messages (include "minio.validateValues.gateway.gcs.projectID" .) -}} +{{- $messages := append $messages (include "minio.validateValues.gateway.nas.persistence" .) -}} +{{- $messages := append $messages (include "minio.validateValues.gateway.s3.credentials" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - must provide a valid mode ("distributed" or "standalone") +*/}} +{{- define "minio.validateValues.mode" -}} +{{- $allowedValues := list "distributed" "standalone" }} +{{- if not (has .Values.mode $allowedValues) -}} +minio: mode + Invalid mode selected. Valid values are "distributed" and + "standalone". Please set a valid mode (--set mode="xxxx") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - total number of drives should be greater than 4 +*/}} +{{- define "minio.validateValues.totalDrives" -}} +{{- $replicaCount := int .Values.statefulset.replicaCount }} +{{- $drivesPerNode := int .Values.statefulset.drivesPerNode }} +{{- $totalDrives := mul $replicaCount $drivesPerNode }} +{{- if and (eq .Values.mode "distributed") (lt $totalDrives 4) -}} +minio: total drives + The total number of drives should be greater than 4 to guarantee erasure coding! + Please set a combination of nodes, and drives per node that match this condition. + For instance (--set statefulset.replicaCount=2 --set statefulset.drivesPerNode=2) +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - TLS secret must provided if TLS is enabled +*/}} +{{- define "minio.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.tls.existingSecret) (not .Values.tls.autoGenerated) }} +minio: tls.existingSecret, tls.autoGenerated + In order to enable TLS, you also need to provide + an existing secret containing the TLS certificates or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - must provide a valid gateway type ("azure", "gcs", "nas" or "s3") +*/}} +{{- define "minio.validateValues.gateway.type" -}} +{{- $allowedValues := list "azure" "gcs" "nas" "s3" }} +{{- if and .Values.gateway.enabled (not (has .Values.gateway.type $allowedValues)) -}} +minio: gateway.type + Invalid Gateway type. Valid values are "azure", "gcs", "nas" and "s3". + Please set a valid mode (--set gateway.type="xxxx") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - when using MinIO® as an Azure Gateway, the StorageAccount Name/Key are required +*/}} +{{- define "minio.validateValues.gateway.azure.credentials" -}} +{{- if and .Values.gateway.enabled (eq .Values.gateway.type "azure") (or (empty .Values.gateway.auth.azure.storageAccountName) (empty .Values.gateway.auth.azure.storageAccountKey)) (or (empty .Values.gateway.auth.azure.storageAccountNameExistingSecret) (empty .Values.gateway.auth.azure.storageAccountNameExistingSecretKey) (empty .Values.gateway.auth.azure.storageAccountKeyExistingSecret) (empty .Values.gateway.auth.azure.storageAccountKeyExistingSecretKey)) }} +minio: gateway.auth.azure + The StorageAccount name and key are required to use MinIO® as a Azure Gateway. + Please set a valid StorageAccount information (--set gateway.auth.azure.storageAccountName="xxxx",gateway.auth.azure.storageAccountKey="yyyy") + Alternatively, specify secrets info to extract StorageAccount name and key: + --set gateway.auth.azure.storageAccountNameExistingSecret="xxxx", --set gateway.auth.azure.storageAccountNameExistingSecretKey="yyyy", + --set gateway.auth.azure.storageAccountKeyExistingSecret="aaaa", --set gateway.auth.azure.storageAccountKeyExistingSecretKey="bbbb" +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - when using MinIO® as a GCS Gateway, the GCP project ID is required +*/}} +{{- define "minio.validateValues.gateway.gcs.projectID" -}} +{{- if and .Values.gateway.enabled (eq .Values.gateway.type "gcs") (empty .Values.gateway.auth.gcs.projectID) }} +minio: gateway.auth.gcs.projectID + A GCP project ID is required to use MinIO® as a GCS Gateway. + Please set a valid project ID (--set gateway.auth.gcs.projectID="xxxx") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - when using MinIO® as a NAS Gateway, ReadWriteMany volumes are required +*/}} +{{- define "minio.validateValues.gateway.nas.persistence" -}} +{{- $replicaCount := int .Values.gateway.replicaCount }} +{{- if and .Values.gateway.enabled (eq .Values.gateway.type "nas") (gt $replicaCount 1) (not .Values.persistence.enabled) }} +minio: persistence.enabled + ReadWriteMany volumes are required to use MinIO® as a NAS Gateway with N replicas. + Please enable persistence (--set persistence.enabled=true) +{{- else if and .Values.gateway.enabled (eq .Values.gateway.type "nas") (gt $replicaCount 1) (include "minio.createPVC" .) (not (has "ReadWriteMany" .Values.persistence.accessModes)) }} +minio: persistence.accessModes + ReadWriteMany volumes are required to use MinIO® as a NAS Gateway with N replicas. + Please set a valid mode (--set persistence.accessModes[0]="ReadWriteMany") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MinIO® - when using MinIO® as a S3 Gateway, the Access & Secret keys are required +*/}} +{{- define "minio.validateValues.gateway.s3.credentials" -}} +{{- if and .Values.gateway.enabled (eq .Values.gateway.type "s3") (not .Values.gateway.auth.s3.useIRSA ) (or (empty .Values.gateway.auth.s3.accessKey) (empty .Values.gateway.auth.s3.secretKey)) }} +minio: gateway.auth.s3 + The Access & Secret keys are required to use MinIO® as a S3 Gateway. + Please set valid keys (--set gateway.auth.s3.accessKey="xxxx",gateway.auth.s3.secretKey="yyyy") +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing MinIO TLS certificates +*/}} +{{- define "minio.tlsSecretName" -}} +{{- if .Values.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.tls.existingSecret $) -}} +{{- else -}} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret object should be created +*/}} +{{- define "minio.createTlsSecret" -}} +{{- if and .Values.tls.enabled .Values.tls.autoGenerated (not .Values.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/minio/templates/api-ingress.yaml b/charts/modules/minio/templates/api-ingress.yaml new file mode 100644 index 0000000..adce0e3 --- /dev/null +++ b/charts/modules/minio/templates/api-ingress.yaml @@ -0,0 +1,60 @@ +{{- if .Values.apiIngress.enabled -}} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }}-api + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.apiIngress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.apiIngress.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.apiIngress.ingressClassName (include "common.ingress.supportsIngressClassname" .) }} + ingressClassName: {{ .Values.apiIngress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.apiIngress.hostname }} + - host: {{ .Values.apiIngress.hostname }} + http: + paths: + {{- if .Values.apiIngress.extraPaths }} + {{- toYaml .Values.apiIngress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.apiIngress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.apiIngress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "minio-api" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.apiIngress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "minio-api" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.apiIngress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.apiIngress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.apiIngress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.apiIngress.annotations )) .Values.apiIngress.selfSigned)) .Values.apiIngress.extraTls }} + tls: + {{- if and .Values.apiIngress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.apiIngress.annotations )) .Values.apiIngress.selfSigned) }} + - hosts: + - {{ .Values.apiIngress.hostname }} + secretName: {{ printf "%s-tls" .Values.apiIngress.hostname }} + {{- end }} + {{- if .Values.apiIngress.extraTls }} + {{- include "common.tplvalues.render" ( dict "value" .Values.apiIngress.extraTls "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/distributed/headless-svc.yaml b/charts/modules/minio/templates/distributed/headless-svc.yaml new file mode 100644 index 0000000..7a2cf6d --- /dev/null +++ b/charts/modules/minio/templates/distributed/headless-svc.yaml @@ -0,0 +1,26 @@ +{{- if and (eq .Values.mode "distributed") (not .Values.gateway.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: minio-api + port: {{ .Values.service.ports.api }} + targetPort: minio-api + - name: minio-console + port: {{ .Values.service.ports.console }} + targetPort: minio-console + publishNotReadyAddresses: true + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/modules/minio/templates/distributed/pdb.yaml b/charts/modules/minio/templates/distributed/pdb.yaml new file mode 100644 index 0000000..fb5b94a --- /dev/null +++ b/charts/modules/minio/templates/distributed/pdb.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.pdb.create (eq .Values.mode "distributed") (not .Values.gateway.enabled) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/modules/minio/templates/distributed/statefulset.yaml b/charts/modules/minio/templates/distributed/statefulset.yaml new file mode 100644 index 0000000..0286839 --- /dev/null +++ b/charts/modules/minio/templates/distributed/statefulset.yaml @@ -0,0 +1,341 @@ +{{- if and (eq .Values.mode "distributed") (not .Values.gateway.enabled) }} +{{- $fullname := include "common.names.fullname" . }} +{{- $headlessService := printf "%s-headless" (include "common.names.fullname" .) | trunc 63 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $apiPort := toString .Values.containerPorts.api }} +{{- $replicaCount := int .Values.statefulset.replicaCount }} +{{- $zoneCount := int .Values.statefulset.zones }} +{{- $drivesPerNode := int .Values.statefulset.drivesPerNode }} +{{- $mountPath := .Values.persistence.mountPath }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ $fullname }} + namespace: {{ $releaseNamespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + serviceName: {{ $headlessService }} + replicas: {{ mul $zoneCount $replicaCount }} + podManagementPolicy: {{ .Values.statefulset.podManagementPolicy }} + {{- if .Values.statefulset.updateStrategy }} + updateStrategy: {{- toYaml .Values.statefulset.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or .Values.podAnnotations (include "minio.createSecret" .) }} + annotations: + {{- if (include "minio.createSecret" .) }} + checksum/credentials-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "minio.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if and .Values.persistence.enabled (gt $drivesPerNode 1) }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ range $diskId := until $drivesPerNode }}{{ $mountPath }}-{{ $diskId }} {{ end }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ $mountPath }} + {{- end }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if and .Values.persistence.enabled (gt $drivesPerNode 1) }} + {{- range $diskId := until $drivesPerNode }} + - name: data-{{ $diskId }} + mountPath: {{ $mountPath }}-{{ $diskId }} + {{- end }} + {{- else }} + - name: data + mountPath: {{ $mountPath }} + {{- end }} + {{- end }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MINIO_DISTRIBUTED_MODE_ENABLED + value: "yes" + - name: MINIO_DISTRIBUTED_NODES + {{- $clusters := list }} + {{- range $i := until $zoneCount }} + {{- $factor := mul $i $replicaCount }} + {{- $endIndex := sub (add $factor $replicaCount) 1 }} + {{- $beginIndex := mul $i $replicaCount }} + {{- $bucket := ternary (printf "%s-{0...%d}" $mountPath (sub $drivesPerNode 1)) $mountPath (gt $drivesPerNode 1) }} + {{- $clusters = append $clusters (printf "%s-{%d...%d}.%s.%s.svc.%s:%s%s" $fullname $beginIndex $endIndex $headlessService $releaseNamespace $clusterDomain $apiPort $bucket) }} + {{- end }} + value: {{ join "," $clusters | quote }} + - name: MINIO_SCHEME + value: {{ ternary "https" "http" .Values.tls.enabled | quote }} + - name: MINIO_FORCE_NEW_KEYS + value: {{ ternary "yes" "no" .Values.auth.forceNewKeys | quote }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_USER_FILE + value: "/opt/bitnami/minio/secrets/root-user" + {{- else }} + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-user + {{- end }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_PASSWORD_FILE + value: "/opt/bitnami/minio/secrets/root-password" + {{- else }} + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-password + {{- end }} + - name: MINIO_SKIP_CLIENT + value: {{ ternary "yes" "no" (empty .Values.defaultBuckets) | quote }} + {{- if .Values.defaultBuckets }} + - name: MINIO_DEFAULT_BUCKETS + value: {{ .Values.defaultBuckets }} + {{- end }} + - name: MINIO_BROWSER + value: {{ ternary "off" "on" .Values.disableWebUI | quote }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: {{ .Values.metrics.prometheusAuthType | quote }} + {{- if .Values.tls.mountPath }} + - name: MINIO_CERTS_DIR + value: {{ .Values.tls.mountPath | quote }} + - name: MINIO_CONSOLE_PORT_NUMBER + value: {{ .Values.containerPorts.console | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: minio-api + containerPort: {{ .Values.containerPorts.api }} + protocol: TCP + - name: minio-console + containerPort: {{ .Values.containerPorts.console }} + protocol: TCP + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /minio/health/live + port: minio-api + scheme: {{ ternary "HTTPS" "HTTP" .Values.tls.enabled | quote }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: minio-api + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: + tcpSocket: + port: minio-api + initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.startupProbe.successThreshold }} + failureThreshold: {{ .Values.startupProbe.failureThreshold }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + mountPath: {{ default "/certs" .Values.tls.mountPath }} + {{- end }} + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode }} + - name: data-{{ $diskId }} + mountPath: {{ $mountPath }}-{{ $diskId }} + {{- end }} + {{- else }} + - name: data + mountPath: {{ $mountPath }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + secret: + secretName: {{ include "minio.tlsSecretName" . }} + items: + - key: tls.crt + path: public.crt + - key: tls.key + path: private.key + - key: ca.crt + path: CAs/public.crt + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and (not .Values.persistence.enabled) (gt $drivesPerNode 1) }} + {{- range $diskId := until $drivesPerNode }} + - name: data-{{ $diskId }} + emptyDir: {} + {{- end }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + {{- if gt $drivesPerNode 1 }} + {{- range $diskId := until $drivesPerNode }} + - metadata: + name: data-{{ $diskId }} + labels: {{- include "common.labels.matchLabels" $ | nindent 10 }} + {{- if $.Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range $.Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ $.Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" $.Values.persistence "global" $.Values.global) | nindent 8 }} + {{- end }} + {{- else }} + - metadata: + name: data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/extra-list.yaml b/charts/modules/minio/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/charts/modules/minio/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/charts/modules/minio/templates/gateway/deployment.yaml b/charts/modules/minio/templates/gateway/deployment.yaml new file mode 100644 index 0000000..7eb0bcc --- /dev/null +++ b/charts/modules/minio/templates/gateway/deployment.yaml @@ -0,0 +1,252 @@ +{{- if .Values.gateway.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.gateway.autoscaling.enabled }} + replicas: {{ .Values.gateway.replicaCount }} + {{- end }} + {{- if .Values.gateway.updateStrategy }} + strategy: {{- toYaml .Values.gateway.updateStrategy | nindent 4 }} + {{- else }} + strategy: {{- toYaml .Values.deployment.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or .Values.podAnnotations (include "minio.createSecret" .) }} + annotations: + {{- if (include "minio.createSecret" .) }} + checksum/credentials-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.gateway.priorityClassName }} + priorityClassName: {{ .Values.gateway.priorityClassName | quote }} + {{- end }} + {{- if .Values.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + {{- if .Values.command }} + {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- else }} + - minio + {{- end }} + args: + {{- if .Values.args }} + {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- else }} + - --certs-dir + - {{ default "/opt/bitnami/minio/certs" .Values.tls.mountPath }} + - gateway + - {{ .Values.gateway.type }} + - --console-address + - :{{ .Values.containerPorts.console }} + {{- if eq .Values.gateway.type "gcs" }} + - {{ .Values.gateway.auth.gcs.projectID }} + {{- else if eq .Values.gateway.type "nas" }} + - {{ .Values.persistence.mountPath }} + {{- else if eq .Values.gateway.type "s3" }} + - {{ .Values.gateway.auth.s3.serviceEndpoint }} + {{- else if and (eq .Values.gateway.type "azure") .Values.gateway.auth.azure.serviceEndpoint }} + - {{ .Values.gateway.auth.azure.serviceEndpoint }} + {{- end }} + {{- end }} + env: + {{- if eq .Values.gateway.type "azure" }} + - name: AZURE_STORAGE_ACCOUNT + valueFrom: + secretKeyRef: + {{- if not ( or (empty .Values.gateway.auth.azure.storageAccountNameExistingSecret) (empty .Values.gateway.auth.azure.storageAccountNameExistingSecretKey)) }} + name: {{ .Values.gateway.auth.azure.storageAccountNameExistingSecret }} + key: {{ .Values.gateway.auth.azure.storageAccountNameExistingSecretKey }} + {{- else }} + name: {{ include "minio.secretName" . }} + key: azure-storage-account-name + {{- end }} + - name: AZURE_STORAGE_KEY + valueFrom: + secretKeyRef: + {{- if not ( or (empty .Values.gateway.auth.azure.storageAccountNameExistingSecret) (empty .Values.gateway.auth.azure.storageAccountNameExistingSecretKey)) }} + name: {{ .Values.gateway.auth.azure.storageAccountKeyExistingSecret }} + key: {{ .Values.gateway.auth.azure.storageAccountKeyExistingSecretKey }} + {{- else }} + name: {{ include "minio.secretName" . }} + key: azure-storage-account-key + {{- end }} + {{- else if and (eq .Values.gateway.type "gcs") .Values.gateway.auth.gcs.keyJSON }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/opt/bitnami/minio/secrets/key.json" + {{- end }} + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-user + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-password + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: {{ .Values.metrics.prometheusAuthType | quote }} + - name: MINIO_CONSOLE_PORT_NUMBER + value: {{ .Values.containerPorts.console | quote }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: minio-api + containerPort: {{ .Values.containerPorts.api }} + protocol: TCP + - name: minio-console + containerPort: {{ .Values.containerPorts.console }} + protocol: TCP + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /minio/health/live + port: minio-api + scheme: {{ ternary "HTTPS" "HTTP" .Values.tls.enabled | quote }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: minio-api + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: minio-console + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.tls.enabled }} + - name: minio-certs + mountPath: {{ default "/opt/bitnami/minio/certs" .Values.tls.mountPath }} + {{- end }} + {{- if and (eq .Values.gateway.type "gcs") .Values.gateway.auth.gcs.keyJSON }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + readOnly: true + {{- end }} + {{- if eq .Values.gateway.type "nas" }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if and (eq .Values.gateway.type "gcs") .Values.gateway.auth.gcs.keyJSON }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} + {{- if eq .Values.gateway.type "nas" }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "minio.claimName" . }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + secret: + secretName: {{ include "minio.tlsSecretName" . }} + items: + - key: tls.crt + path: public.crt + - key: tls.key + path: private.key + - key: ca.crt + path: CAs/public.crt + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/gateway/hpa.yaml b/charts/modules/minio/templates/gateway/hpa.yaml new file mode 100644 index 0000000..41a2b37 --- /dev/null +++ b/charts/modules/minio/templates/gateway/hpa.yaml @@ -0,0 +1,46 @@ +{{- if .Values.gateway.autoscaling.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "common.names.fullname" . }} + minReplicas: {{ .Values.gateway.autoscaling.minReplicas }} + maxReplicas: {{ .Values.gateway.autoscaling.maxReplicas }} + metrics: + {{- if .Values.gateway.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.gateway.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.gateway.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.gateway.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.gateway.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.gateway.autoscaling.targetCPU }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/gateway/pdb.yaml b/charts/modules/minio/templates/gateway/pdb.yaml new file mode 100644 index 0000000..05e2b89 --- /dev/null +++ b/charts/modules/minio/templates/gateway/pdb.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.pdb.create .Values.gateway.enabled }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/modules/minio/templates/ingress.yaml b/charts/modules/minio/templates/ingress.yaml new file mode 100644 index 0000000..0bdddd1 --- /dev/null +++ b/charts/modules/minio/templates/ingress.yaml @@ -0,0 +1,60 @@ +{{- if and .Values.ingress.enabled (not .Values.disableWebUI ) -}} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (include "common.ingress.supportsIngressClassname" .) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ .Values.ingress.hostname }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "minio-console" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "minio-console" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingress.extraTls "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/networkpolicy.yaml b/charts/modules/minio/templates/networkpolicy.yaml new file mode 100644 index 0000000..7c1edc3 --- /dev/null +++ b/charts/modules/minio/templates/networkpolicy.yaml @@ -0,0 +1,34 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.containerPorts.console }} + - port: {{ .Values.containerPorts.api }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + + {{- if .Values.networkPolicy.extraFromClauses }} + {{- toYaml .Values.networkPolicy.extraFromClauses | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/prometheusrule.yaml b/charts/modules/minio/templates/prometheusrule.yaml new file mode 100644 index 0000000..c235dab --- /dev/null +++ b/charts/modules/minio/templates/prometheusrule.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 6 }} +{{- end }} diff --git a/charts/modules/minio/templates/provisioning-configmap.yaml b/charts/modules/minio/templates/provisioning-configmap.yaml new file mode 100644 index 0000000..07112b0 --- /dev/null +++ b/charts/modules/minio/templates/provisioning-configmap.yaml @@ -0,0 +1,73 @@ +{{- if .Values.provisioning.enabled }} +{{- $fullname := printf "%s-provisioning" (include "common.names.fullname" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $fullname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: minio-provisioning + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- range $bucket := .Values.provisioning.buckets }} + {{- if $bucket.lifecycle }} + bucket-{{ $bucket.name }}.json: | + { + "Rules": [ + {{- range $idx, $lifecycle := $bucket.lifecycle }} + {{- if not (eq $idx 0) }} + , + {{- end }} + { + "ID": "{{ $lifecycle.id }}", + "Status": "{{ ternary "Disabled" "Enabled" (and (not (empty $lifecycle.disabled)) $lifecycle.disabled) }}", + {{- if $lifecycle.expiry }} + "Expiration": { + {{- with $lifecycle.expiry.date }} + "Date": "{{ . }}" + {{- end }} + {{- with $lifecycle.expiry.days }} + "Days": {{ . }} + {{- end }} + } + {{- with $lifecycle.expiry.nonconcurrentDays }} + , + "NoncurrentVersionExpiration": { + "NoncurrentDays": {{ . }} + } + {{- end }} + {{- with $lifecycle.prefix }} + , + "Filter": { + "Prefix": "{{ . }}" + } + {{- end }} + } + {{- end }} + {{- end }} + ] + } + {{- end }} + {{- end }} + {{- range $policy := .Values.provisioning.policies }} + policy-{{ $policy.name }}.json: | + {{- $statementsLength := sub (len $policy.statements) 1 }} + { + "Version": "2012-10-17", + "Statement": [ + {{- range $i, $statement := $policy.statements }} + { + "Effect": "{{ default "Deny" $statement.effect }}"{{ if $statement.actions }}, + "Action": {{ toJson $statement.actions }}{{end}}{{ if $statement.resources }}, + "Resource": {{ toJson $statement.resources }}{{end}} + }{{ if lt $i $statementsLength }},{{end }} + {{- end }} + ] + } + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/provisioning-job.yaml b/charts/modules/minio/templates/provisioning-job.yaml new file mode 100644 index 0000000..9c3dbc1 --- /dev/null +++ b/charts/modules/minio/templates/provisioning-job.yaml @@ -0,0 +1,311 @@ +{{- if .Values.provisioning.enabled }} +{{- $fullname := printf "%s-provisioning" (include "common.names.fullname" .) }} +{{- $minioAlias := "provisioning" }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $fullname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: minio-provisioning + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + parallelism: 1 + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: minio-provisioning + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.provisioning.podAnnotations }} + annotations: + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.provisioning.schedulerName }} + schedulerName: {{ .Values.provisioning.schedulerName }} + {{- end }} + restartPolicy: OnFailure + terminationGracePeriodSeconds: 0 + {{- if .Values.provisioning.podSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} + initContainers: + - name: wait-for-available-minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - >- + set -e; + echo "Waiting for Minio"; + wait-for-port \ + --host={{ include "common.names.fullname" . }} \ + --state=inuse \ + --timeout=120 \ + {{ .Values.service.ports.api | int64 }}; + echo "Minio is available"; + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.provisioning.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - >- + set -e; + echo "Start Minio provisioning"; + + function addPolicy() { + local tmp=$(mc admin $1 info {{ $minioAlias }} $2 | sed -n -e 's/^Policy.*: \(.*\)$/\1/p'); + IFS=',' read -r -a CURRENT_POLICIES <<< "$tmp"; + if [[ ! "${CURRENT_POLICIES[*]}" =~ "$3" ]]; then + mc admin policy update {{ $minioAlias }} $3 $1=$2; + fi; + }; + + function addUsersFromFile() { + local username=$(grep -oP '^username=\K.+' $1); + local password=$(grep -oP '^password=\K.+' $1); + local disabled=$(grep -oP '^disabled=\K.+' $1); + local policies_list=$(grep -oP '^policies=\K.+' $1); + local set_policies=$(grep -oP '^setPolicies=\K.+' $1); + + mc admin user add {{ $minioAlias }} "${username}" "${password}"; + + if [ "${set_policies}" == "true" ]; then + mc admin policy set {{ $minioAlias }} "${policies_list}" user="${username}"; + else + IFS=',' read -r -a POLICIES <<< "${policies_list}"; + for policy in "${POLICIES[@]}"; do + addPolicy user "${username}" "${policy}"; + done + fi; + + local user_status="enable"; + if [[ "${disabled}" != "" && "${disabled,,}" == "true" ]]; then + user_status="disable"; + fi; + + mc admin user "${user_status}" {{ $minioAlias }} "${username}"; + }; + + {{- $minioUrl := printf "$MINIO_SCHEME://%s:%d" (include "common.names.fullname" .) (.Values.service.ports.api | int) }} + {{- $minioRootUser := ternary ("$(<$MINIO_ROOT_USER_FILE)") ("$MINIO_ROOT_USER") (.Values.auth.useCredentialsFiles) }} + {{- $minioRootPassword := ternary ("$(<$MINIO_ROOT_PASSWORD_FILE)") ("$MINIO_ROOT_PASSWORD") (.Values.auth.useCredentialsFiles) }} + mc alias set {{ $minioAlias }} {{ $minioUrl }} {{ $minioRootUser }} {{ $minioRootPassword }}; + + {{- range $config := .Values.provisioning.config }} + {{- $options := list }} + {{- range $name, $value := $config.options }} + {{- $options = (printf "%s=%s" $name $value) | append $options }} + {{- end }} + {{- $options := join " " $options }} + mc admin config set {{ $minioAlias }} {{ $config.name }} {{ $options }}; + {{- end }} + + mc admin service restart {{ $minioAlias }}; + + {{- range $policy := .Values.provisioning.policies }} + mc admin policy add {{ $minioAlias }} {{ $policy.name }} /etc/ilm/policy-{{ $policy.name }}.json; + {{- end }} + + {{- range $user := .Values.provisioning.users }} + mc admin user add {{ $minioAlias }} {{ $user.username }} {{ $user.password }}; + {{- if $user.setPolicies }} + mc admin policy set {{ $minioAlias }} "{{ join "," $user.policies }}" user={{ $user.username }}; + {{- else }} + {{- range $policy := $user.policies }} + addPolicy user {{ $user.username }} {{ $policy }}; + {{- end }} + {{- end }} + {{- $userStatus := ternary ("disable") ("enable") (and (not (empty $user.disabled)) $user.disabled) }} + mc admin user {{ $userStatus }} {{ $minioAlias }} {{ $user.username }}; + {{- end }} + {{- if gt (len .Values.provisioning.usersExistingSecrets) 0 }} + while read -d '' configFile; do + addUsersFromFile "${configFile}"; + done < <(find "/opt/bitnami/minio/users/" -type l -not -name '..data' -print0); + {{- end }} + + {{- range $group := .Values.provisioning.groups }} + mc admin group add {{ $minioAlias }} {{ $group.name }} {{ join " " $group.members }}; + {{- if $group.setPolicies }} + mc admin policy set {{ $minioAlias }} "{{ join "," $group.policies }}" group={{ $group.name }}; + {{- else }} + {{- range $policy := $group.policies }} + addPolicy group {{ $group.name }} {{ $policy }}; + {{- end }} + {{- end }} + {{- $groupStatus := ternary ("disable") ("enable") (and (not (empty $group.disabled)) $group.disabled) }} + mc admin group {{ $groupStatus }} {{ $minioAlias }} {{ $group.name }}; + {{- end }} + + {{- $isDistributedMode := and (eq .Values.mode "distributed") (not .Values.gateway.enabled) }} + {{- range $bucket := .Values.provisioning.buckets }} + {{- $target := printf "%s/%s" $minioAlias $bucket.name }} + {{- $region := ternary (printf "--region=%s" $bucket.region) ("") (not (empty $bucket.region)) }} + {{- $withLock := ternary ("--with-lock") ("") (and (not (empty $bucket.withLock)) $bucket.withLock) }} + mc mb {{ $target }} --ignore-existing {{ $region }} {{ $withLock }}; + + {{- if $bucket.lifecycle }} + mc ilm import {{ $minioAlias }}/{{ $bucket.name }} < /etc/ilm/bucket-{{ $bucket.name }}.json; + {{- end }} + + {{- with $bucket.quota }} + mc admin bucket quota {{ $minioAlias }}/{{ $bucket.name }} --{{ .type }} {{ .size }}; + {{- end }} + + {{- if $isDistributedMode }} + {{- if (or ((empty $bucket.withLock)) (not $bucket.withLock)) }} + {{- $versioning := ternary ("enable") ("suspend") (and (not (empty $bucket.versioning)) $bucket.versioning) }} + mc version {{ $versioning }} {{ $minioAlias }}/{{ $bucket.name }}; + {{- end }} + {{- end }} + + {{- if $bucket.tags }} + {{- $target := printf "%s/%s" $minioAlias $bucket.name }} + {{- $tags := list }} + {{- range $name, $value := $bucket.tags }} + {{- $tags = (printf "%s=%s" $name $value) | append $tags }} + {{- end }} + {{- $tags := join "&" $tags | quote }} + mc tag set {{ $target }} {{ $tags }}; + {{- end }} + {{- end }} + + {{- if .Values.provisioning.extraCommands }} + {{ join ";" .Values.provisioning.extraCommands | nindent 14 }}; + {{- end }} + + echo "End Minio provisioning"; + {{- end }} + {{- if .Values.provisioning.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MINIO_SCHEME + value: {{ ternary "https" "http" .Values.tls.enabled | quote }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_USER_FILE + value: "/opt/bitnami/minio/secrets/root-user" + {{- else }} + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-user + {{- end }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_PASSWORD_FILE + value: "/opt/bitnami/minio/secrets/root-password" + {{- else }} + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-password + {{- end }} + {{- if .Values.tls.mountPath }} + - name: MINIO_CERTS_DIR + value: {{ .Values.tls.mountPath | quote }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.provisioning.enabled }} + - name: minio-provisioning + mountPath: /etc/ilm + {{- end }} + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + mountPath: {{ default "/certs" .Values.tls.mountPath }} + - name: minio-client-certs + mountPath: /.mc/certs + {{- end }} + {{- range $idx, $_ := .Values.provisioning.usersExistingSecrets }} + - name: {{ printf "users-secret-%d" $idx }} + mountPath: /opt/bitnami/minio/users/{{ $idx }}/ + {{- end }} + {{- if .Values.provisioning.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.provisioning.enabled }} + - name: minio-provisioning + configMap: + name: {{ $fullname }} + {{- end }} + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} + {{- range $idx, $userSecret := .Values.provisioning.usersExistingSecrets }} + - name: {{ printf "users-secret-%d" $idx }} + secret: + secretName: {{ $userSecret }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + secret: + secretName: {{ include "minio.tlsSecretName" . }} + items: + - key: tls.crt + path: public.crt + - key: tls.key + path: private.key + - key: ca.crt + path: CAs/public.crt + - name: minio-client-certs + secret: + secretName: {{ include "minio.tlsSecretName" . }} + items: + - key: ca.crt + path: CAs/public.crt + {{- end }} + {{- if .Values.provisioning.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/pvc.yaml b/charts/modules/minio/templates/pvc.yaml new file mode 100644 index 0000000..d17fd08 --- /dev/null +++ b/charts/modules/minio/templates/pvc.yaml @@ -0,0 +1,29 @@ +{{- if (include "minio.createPVC" .) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.persistence.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.persistence.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 2 }} +{{- end }} diff --git a/charts/modules/minio/templates/secrets.yaml b/charts/modules/minio/templates/secrets.yaml new file mode 100644 index 0000000..82dba8b --- /dev/null +++ b/charts/modules/minio/templates/secrets.yaml @@ -0,0 +1,24 @@ +{{- if (include "minio.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + root-user: {{ include "minio.secret.userValue" . | b64enc | quote }} + root-password: {{ include "minio.secret.passwordValue" . | b64enc | quote }} + {{- if eq .Values.gateway.type "azure" }} + azure-storage-account-name: {{ .Values.gateway.auth.azure.storageAccountName | toString | b64enc | quote }} + azure-storage-account-key: {{ .Values.gateway.auth.azure.storageAccountKey | toString | b64enc | quote }} + {{- else if and (eq .Values.gateway.type "gcs") .Values.gateway.auth.gcs.keyJSON | quote }} + key.json: {{ .Values.gateway.auth.gcs.keyJSON | toString | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/service.yaml b/charts/modules/minio/templates/service.yaml new file mode 100644 index 0000000..9c65830 --- /dev/null +++ b/charts/modules/minio/templates/service.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{ end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: minio-api + port: {{ .Values.service.ports.api }} + targetPort: minio-api + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.api)) }} + nodePort: {{ .Values.service.nodePorts.api }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: minio-console + port: {{ .Values.service.ports.console }} + targetPort: minio-console + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.console)) }} + nodePort: {{ .Values.service.nodePorts.console }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/charts/modules/minio/templates/serviceaccount.yaml b/charts/modules/minio/templates/serviceaccount.yaml new file mode 100644 index 0000000..f0c6566 --- /dev/null +++ b/charts/modules/minio/templates/serviceaccount.yaml @@ -0,0 +1,23 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "minio.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +secrets: + - name: {{ include "common.names.fullname" . }} +{{- end }} diff --git a/charts/modules/minio/templates/servicemonitor.yaml b/charts/modules/minio/templates/servicemonitor.yaml new file mode 100644 index 0000000..fec24be --- /dev/null +++ b/charts/modules/minio/templates/servicemonitor.yaml @@ -0,0 +1,51 @@ +{{- if .Values.metrics.serviceMonitor.enabled }} +apiVersion: {{ default "monitoring.coreos.com/v1" .Values.metrics.serviceMonitor.apiVersion }} +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace | quote }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: minio-api + path: {{ .Values.metrics.serviceMonitor.path }} + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/standalone/deployment.yaml b/charts/modules/minio/templates/standalone/deployment.yaml new file mode 100644 index 0000000..a90d80c --- /dev/null +++ b/charts/modules/minio/templates/standalone/deployment.yaml @@ -0,0 +1,258 @@ +{{- if and (eq .Values.mode "standalone") (not .Values.gateway.enabled) }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.deployment.updateStrategy }} + strategy: {{- toYaml .Values.deployment.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or .Values.podAnnotations (include "minio.createSecret" .) }} + annotations: + {{- if (include "minio.createSecret" .) }} + checksum/credentials-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "minio.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ .Values.persistence.mountPath }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MINIO_SCHEME + value: {{ ternary "https" "http" .Values.tls.enabled | quote }} + - name: MINIO_FORCE_NEW_KEYS + value: {{ ternary "yes" "no" .Values.auth.forceNewKeys | quote }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_USER_FILE + value: "/opt/bitnami/minio/secrets/root-user" + {{- else }} + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-user + {{- end }} + {{- if .Values.auth.useCredentialsFiles }} + - name: MINIO_ROOT_PASSWORD_FILE + value: "/opt/bitnami/minio/secrets/root-password" + {{- else }} + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: root-password + {{- end }} + {{- if .Values.defaultBuckets }} + - name: MINIO_DEFAULT_BUCKETS + value: {{ .Values.defaultBuckets }} + {{- end }} + - name: MINIO_BROWSER + value: {{ ternary "off" "on" .Values.disableWebUI | quote }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: {{ .Values.metrics.prometheusAuthType | quote }} + - name: MINIO_CONSOLE_PORT_NUMBER + value: {{ .Values.containerPorts.console | quote }} + {{- if .Values.tls.mountPath }} + - name: MINIO_CERTS_DIR + value: {{ .Values.tls.mountPath | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: minio-api + containerPort: {{ .Values.containerPorts.api }} + protocol: TCP + - name: minio-console + containerPort: {{ .Values.containerPorts.console }} + protocol: TCP + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /minio/health/live + port: minio-api + scheme: {{ ternary "HTTPS" "HTTP" .Values.tls.enabled | quote }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: minio-api + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: + tcpSocket: + port: minio-console + initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.startupProbe.successThreshold }} + failureThreshold: {{ .Values.startupProbe.failureThreshold }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + {{- end }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.tls.enabled }} + - name: minio-certs + mountPath: {{ default "/certs" .Values.tls.mountPath }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.auth.useCredentialsFiles }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ include "minio.claimName" . }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: minio-certs + secret: + secretName: {{ include "minio.tlsSecretName" . }} + items: + - key: tls.crt + path: public.crt + - key: tls.key + path: private.key + - key: ca.crt + path: CAs/public.crt + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/modules/minio/templates/tls-secrets.yaml b/charts/modules/minio/templates/tls-secrets.yaml new file mode 100644 index 0000000..e3c1836 --- /dev/null +++ b/charts/modules/minio/templates/tls-secrets.yaml @@ -0,0 +1,71 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "minio-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls" .Values.ingress.hostname }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +{{- end }} +{{- end }} +{{- if (include "minio.createTlsSecret" .) }} +{{- $ca := genCA "minio-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-crt + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/charts/modules/minio/values.yaml b/charts/modules/minio/values.yaml new file mode 100644 index 0000000..85216fc --- /dev/null +++ b/charts/modules/minio/values.yaml @@ -0,0 +1,1156 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## @section MinIO® parameters + +## Bitnami MinIO® image version +## ref: https://hub.docker.com/r/bitnami/minio/tags/ +## @param image.registry MinIO® image registry +## @param image.repository MinIO® image repository +## @param image.tag MinIO® image tag (immutable tags are recommended) +## @param image.digest MinIO® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug logs should be enabled +## +image: + registry: docker.io + repository: bitnami/minio + tag: 2022.11.29-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Bitnami MinIO® Client image version +## ref: https://hub.docker.com/r/bitnami/minio-client/tags/ +## @param clientImage.registry MinIO® Client image registry +## @param clientImage.repository MinIO® Client image repository +## @param clientImage.tag MinIO® Client image tag (immutable tags are recommended) +## @param clientImage.digest MinIO® Client image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## +clientImage: + registry: docker.io + repository: bitnami/minio-client + tag: 2022.11.17-debian-11-r4 + digest: "" +## @param mode MinIO® server mode (`standalone` or `distributed`) +## ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +mode: standalone +## MinIO® authentication parameters +## +auth: + ## @param auth.rootUser MinIO® root username + ## + rootUser: admin + ## @param auth.rootPassword Password for MinIO® root user + ## + rootPassword: "" + ## @param auth.existingSecret Use existing secret for credentials details (`auth.rootUser` and `auth.rootPassword` will be ignored and picked up from this secret). The secret has to contain the keys `root-user` and `root-password`) + ## + existingSecret: "" + ## @param auth.forcePassword Force users to specify required passwords + ## + forcePassword: false + ## @param auth.useCredentialsFiles Mount credentials as a files instead of using an environment variable + ## + useCredentialsFiles: false + ## @param auth.forceNewKeys Force root credentials (user and password) to be reconfigured every time they change in the secrets + ## + forceNewKeys: false +## @param defaultBuckets Comma, semi-colon or space separated list of buckets to create at initialization (only in standalone mode) +## e.g: +## defaultBuckets: "my-bucket, my-second-bucket" +## +defaultBuckets: "" +## @param disableWebUI Disable MinIO® Web UI +## ref: https://github.com/minio/minio/tree/master/docs/config/#browser +## +disableWebUI: false +## Enable tls in front of MinIO® containers. +## +tls: + ## @param tls.enabled Enable tls in front of the container + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.existingSecret Name of an existing secret holding the certificate information + ## + existingSecret: "" + ## @param tls.mountPath The mount path where the secret will be located + ## Custom mount path where the certificates will be located, if empty will default to /certs + mountPath: "" +## @param extraEnvVars Extra environment variables to be set on MinIO® container +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap with extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret with extra environment variables +## +extraEnvVarsSecret: "" +## @param command Default container command (useful when using custom images). Use array form +## +command: [] +## @param args Default container args (useful when using custom images). Use array form +## +args: [] + +## @section MinIO® deployment/statefulset parameters + +## @param schedulerName Specifies the schedulerName, if it's nil uses kube-scheduler +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param terminationGracePeriodSeconds In seconds, time the given to the MinIO pod needs to terminate gracefully +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +terminationGracePeriodSeconds: "" +## MinIO® deployment parameters +## Only when 'mode' is 'standalone' or 'gateway.enabled' is 'true' +## +deployment: + ## @param deployment.updateStrategy.type Deployment strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## e.g: + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: Recreate +## MinIO® statefulset parameters +## Only when mode is 'distributed' +## +statefulset: + ## @param statefulset.updateStrategy.type StatefulSet strategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## e.g: + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: RollingUpdate + ## @param statefulset.podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## + podManagementPolicy: Parallel + ## @param statefulset.replicaCount Number of pods per zone (only for MinIO® distributed mode). Should be even and `>= 4` + ## + replicaCount: 4 + ## @param statefulset.zones Number of zones (only for MinIO® distributed mode) + ## + zones: 1 + ## @param statefulset.drivesPerNode Number of drives attached to every node (only for MinIO® distributed mode) + ## + drivesPerNode: 1 + +## MinIO® provisioning +## +provisioning: + ## @param provisioning.enabled Enable MinIO® provisioning Job + ## + enabled: false + ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for MinIO® provisioning + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param provisioning.podAnnotations Provisioning Pod annotations. + ## + podAnnotations: {} + ## @param provisioning.command Default provisioning container command (useful when using custom images). Use array form + ## + command: [] + ## @param provisioning.args Default provisioning container args (useful when using custom images). Use array form + ## + args: [] + ## @param provisioning.extraCommands Optionally specify extra list of additional commands for MinIO® provisioning pod + ## + extraCommands: [] + ## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for MinIO® provisioning pod + ## + extraVolumes: [] + ## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for MinIO® provisioning container + ## + extraVolumeMounts: [] + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param provisioning.resources.limits The resources limits for the container + ## @param provisioning.resources.requests The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 64Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 200m + ## memory: 128Mi + requests: {} + ## @param provisioning.policies MinIO® policies provisioning + ## https://docs.min.io/docs/minio-admin-complete-guide.html#policy + ## e.g. + ## policies: + ## - name: custom-bucket-specific-policy + ## statements: + ## - resources: + ## - "arn:aws:s3:::my-bucket" + ## actions: + ## - "s3:GetBucketLocation" + ## - "s3:ListBucket" + ## - "s3:ListBucketMultipartUploads" + ## - resources: + ## - "arn:aws:s3:::my-bucket/*" + ## # Allowed values: "Allow" | "Deny" + ## # Defaults to "Deny" if not specified + ## effect: "Allow" + ## actions: + ## - "s3:AbortMultipartUpload" + ## - "s3:DeleteObject" + ## - "s3:GetObject" + ## - "s3:ListMultipartUploadParts" + ## - "s3:PutObject" + policies: [] + ## @param provisioning.users MinIO® users provisioning. Can be used in addition to provisioning.usersExistingSecrets. + ## https://docs.min.io/docs/minio-admin-complete-guide.html#user + ## e.g. + ## users: + ## - username: test-username + ## password: test-password + ## disabled: false + ## policies: + ## - readwrite + ## - consoleAdmin + ## - diagnostics + ## # When set to true, it will replace all policies with the specified. + ## # When false, the policies will be added to the existing. + ## setPolicies: false + users: [] + ## @param provisioning.usersExistingSecrets Array if existing secrets containing MinIO® users to be provisioned. Can be used in addition to provisioning.users. + ## https://docs.min.io/docs/minio-admin-complete-guide.html#user + ## + ## Instead of configuring users inside values.yaml, referring to existing Kubernetes secrets containing user + ## configurations is possible. + ## e.g. + ## usersExistingSecrets: + ## - centralized-minio-users + ## + ## All provided Kubernetes secrets require a specific data structure. The same data from the provisioning.users example above + ## can be defined via secrets with the following data structure. The secret keys have no meaning to the provisioning job except that + ## they are used as filenames. + ## ## apiVersion: v1 + ## ## kind: Secret + ## ## metadata: + ## ## name: centralized-minio-users + ## ## type: Opaque + ## ## stringData: + ## ## username1: | + ## ## username=test-username + ## ## password=test-password + ## ## disabled=false + ## ## policies=readwrite,consoleAdmin,diagnostics + ## ## setPolicies=false + usersExistingSecrets: [] + ## @param provisioning.groups MinIO® groups provisioning + ## https://docs.min.io/docs/minio-admin-complete-guide.html#group + ## e.g. + ## groups + ## - name: test-group + ## disabled: false + ## members: + ## - test-username + ## policies: + ## - readwrite + ## # When set to true, it will replace all policies with the specified. + ## # When false, the policies will be added to the existing. + ## setPolicies: false + groups: [] + ## @param provisioning.buckets MinIO® buckets, versioning, lifecycle, quota and tags provisioning + ## Buckets https://docs.min.io/docs/minio-client-complete-guide.html#mb + ## Lifecycle https://docs.min.io/docs/minio-client-complete-guide.html#ilm + ## Quotas https://docs.min.io/docs/minio-admin-complete-guide.html#bucket + ## Tags https://docs.min.io/docs/minio-client-complete-guide.html#tag + ## Versioning https://docs.min.io/docs/minio-client-complete-guide.html#version + ## e.g. + ## buckets: + ## - name: test-bucket + ## region: us-east-1 + ## # Only when mode is 'distributed' + ## # ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide + ## versioning: false + ## # Versioning is automatically enabled if withLock is true + ## # ref: https://docs.min.io/docs/minio-bucket-versioning-guide.html + ## withLock: true + ## # Only when mode is 'distributed' + ## # ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide + ## lifecycle: + ## - id: TestPrefix7dRetention + ## prefix: test-prefix + ## disabled: false + ## expiry: + ## days: 7 + ## # Days !OR! date + ## # date: "2021-11-11T00:00:00Z" + ## nonconcurrentDays: 3 + ## # Only when mode is 'distributed' + ## # ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide + ## quota: + ## # hard or clear(+ omit size) + ## type: hard + ## size: 10GiB + ## tags: + ## key1: value1 + buckets: [] + ## @param provisioning.config MinIO® config provisioning + ## https://docs.min.io/docs/minio-server-configuration-guide.html + ## e.g. + ## config: + ## - name: region + ## options: + ## name: us-east-1 + config: [] + ## MinIO® pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param provisioning.podSecurityContext.enabled Enable pod Security Context + ## @param provisioning.podSecurityContext.fsGroup Group ID for the container + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MinIO® container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param provisioning.containerSecurityContext.enabled Enable container Security Context + ## @param provisioning.containerSecurityContext.runAsUser User ID for the container + ## @param provisioning.containerSecurityContext.runAsNonRoot Avoid running as root User + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + +## @param hostAliases MinIO® pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param containerPorts.api MinIO® container port to open for MinIO® API +## @param containerPorts.console MinIO® container port to open for MinIO® Console +## +containerPorts: + api: 9000 + console: 9001 +## MinIO® pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable pod Security Context +## @param podSecurityContext.fsGroup Group ID for the container +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## MinIO® container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enable container Security Context +## @param containerSecurityContext.runAsUser User ID for the container +## @param containerSecurityContext.runAsNonRoot Avoid running as root User +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true +## @param podLabels Extra labels for MinIO® pods +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for MinIO® pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for MinIO® pods assignment spread across your cluster among failure-domains +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param priorityClassName MinIO® pods' priorityClassName +## +priorityClassName: "" +## MinIO® containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the MinIO® container +## @param resources.requests The requested resources for the MinIO® container +## +resources: + ## Example: + ## limits: + ## cpu: 250m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} +## Configure extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +## Configure extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 +## Configure extra options for startupProbe probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param startupProbe.enabled Enable startupProbe +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 0 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 60 +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} +## @param customStartupProbe Override default startup probe +## +customStartupProbe: {} +## @param lifecycleHooks for the MinIO® container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## @param extraVolumes Optionally specify extra list of additional volumes for MinIO® pods +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for MinIO® container(s) +## +extraVolumeMounts: [] +## @param initContainers Add additional init containers to the MinIO® pods +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars Add additional sidecar containers to the MinIO® pods +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## @section Traffic exposure parameters + +## MinIO® Service properties +## +service: + ## @param service.type MinIO® service type + ## + type: ClusterIP + ## @param service.ports.api MinIO® API service port + ## @param service.ports.console MinIO® Console service port + ## + ports: + api: 9000 + console: 9001 + ## @param service.nodePorts.api Specify the MinIO® API nodePort value for the LoadBalancer and NodePort service types + ## @param service.nodePorts.console Specify the MinIO® Console nodePort value for the LoadBalancer and NodePort service types + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + api: "" + console: "" + ## @param service.clusterIP Service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer` (optional, cloud specific) + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.annotations Annotations for MinIO® service + ## This can be used to set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} +## Configure the ingress resource that allows you to access the +## MinIO® Console. Set up the URL +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress controller resource for MinIO Console + ## + enabled: false + ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster. + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: minio.local + ## @param ingress.path The Path to MinIO®. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.servicePort Service port to be used + ## Default is http. Alternative is https. + ## + servicePort: minio-console + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: minio.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths Any additional paths that may need to be added to the ingress under the main host + ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - minio.local + ## secretName: minio.local-tls + ## + extraTls: [] + ## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate are expected in PEM format + ## name should line up with a secretName set further up + ## + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## + ## Example + ## secrets: + ## - name: minio.local-tls + ## key: "" + ## certificate: "" + ## + secrets: [] + ## @param ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + +## Configure the ingress resource that allows you to access the +## MinIO® API. Set up the URL +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +apiIngress: + ## @param apiIngress.enabled Enable ingress controller resource for MinIO API + ## + enabled: false + ## @param apiIngress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param apiIngress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster. + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param apiIngress.hostname Default host for the ingress resource + ## + hostname: minio.local + ## @param apiIngress.path The Path to MinIO®. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + ## @param apiIngress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param apiIngress.servicePort Service port to be used + ## Default is http. Alternative is https. + ## + servicePort: minio-api + ## @param apiIngress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param apiIngress.tls Enable TLS configuration for the hostname defined at `apiIngress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.apiIngress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param apiIngress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param apiIngress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: minio.local + ## path: / + ## + extraHosts: [] + ## @param apiIngress.extraPaths Any additional paths that may need to be added to the ingress under the main host + ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param apiIngress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - minio.local + ## secretName: minio.local-tls + ## + extraTls: [] + ## @param apiIngress.secrets If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate are expected in PEM format + ## name should line up with a secretName set further up + ## + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## + ## Example + ## secrets: + ## - name: minio.local-tls + ## key: "" + ## certificate: "" + ## + secrets: [] + ## @param apiIngress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] +## NetworkPolicy parameters +## +networkPolicy: + ## @param networkPolicy.enabled Enable the default NetworkPolicy policy + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port MinIO® is + ## listening on. When true, MinIO® will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraFromClauses Allows to add extra 'from' clauses to the NetworkPolicy + extraFromClauses: {} + ## Example + ## extraFromClauses: + ## - podSelector: + ## matchLabels: + ## a: b + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable MinIO® data persistence using PVC. If false, use emptyDir + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for MinIO® data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.mountPath Data volume mount path + ## + mountPath: /data + ## @param persistence.accessModes PVC Access Modes for MinIO® data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for MinIO® data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.existingClaim Name of an existing PVC to use (only in `standalone` mode) + ## + existingClaim: "" + +## @section Volume Permissions parameters + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r57 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container' resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 500m + ## memory: 1Gi + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section RBAC parameters + +## Specifies whether a ServiceAccount should be created +## +serviceAccount: + ## @param serviceAccount.create Enable the creation of a ServiceAccount for MinIO® pods + ## + create: true + ## @param serviceAccount.name Name of the created ServiceAccount + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Enable/disable auto mounting of the service account token + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Custom annotations for MinIO® ServiceAccount + ## + annotations: {} + +## @section Other parameters + +## MinIO® Pod Disruption Budget configuration in distributed mode. +## If MinIO® Gateway is enabled, creates a Pod Disruption Budget for the Gateway instead (mutually exclusive with distributed mode). +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable after the eviction + ## + maxUnavailable: "" + +## @section Metrics parameters + +metrics: + ## @param metrics.prometheusAuthType Authentication mode for Prometheus (`jwt` or `public`) + ## To allow public access without authentication for prometheus metrics set environment as follows. + ## + prometheusAuthType: public + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled If the operator is installed in your cluster, set to true to create a Service Monitor Entry + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace which Prometheus is running in + ## + namespace: "" + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus + ## + jobLabel: "" + ## @param metrics.serviceMonitor.path HTTP path to scrape for metrics + ## + path: /minio/v2/metrics/cluster + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + scrapeTimeout: "" + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.relabelings Metrics relabelings to add to the scrape endpoint, applied before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.apiVersion ApiVersion for the serviceMonitor Resource (defaults to "monitoring.coreos.com/v1") + apiVersion: "" + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Prometheus Rule definitions + # - alert: minio cluster nodes offline + # annotations: + # summary: "minio cluster nodes offline" + # description: "minio cluster nodes offline, pod {{`{{`}} $labels.pod {{`}}`}} service {{`{{`}} $labels.job {{`}}`}} offline" + # for: 10m + # expr: minio_cluster_nodes_offline_total > 0 + # labels: + # severity: critical + # group: PaaS + ## + rules: [] + +## @section Gateway parameters + +gateway: + ## @param gateway.enabled Use MinIO® as Gateway for other storage systems + ## + enabled: false + ## @param gateway.type Gateway type. Supported types are: `azure`, `gcs`, `nas`, `s3` + ## ref: https://docs.minio.io/docs/minio-gateway-for-azure + ## ref: https://docs.minio.io/docs/minio-gateway-for-gcs + ## ref: https://docs.minio.io/docs/minio-gateway-for-nas + ## ref: https://docs.minio.io/docs/minio-gateway-for-s3 + ## + type: s3 + ## @param gateway.replicaCount Number of MinIO® Gateway replicas + ## + replicaCount: 4 + ## @param gateway.updateStrategy.type Update strategy type for MinIO® Gateway replicas + updateStrategy: + type: Recreate + ## Autoscaling configuration for MinIO® Gateway. overrides gateway.replicaCount if enabled + ## @param gateway.autoscaling.enabled Enable autoscaling for MinIO® Gateway deployment + ## @param gateway.autoscaling.minReplicas Minimum number of replicas to scale back + ## @param gateway.autoscaling.maxReplicas Maximum number of replicas to scale out + ## @param gateway.autoscaling.targetCPU Target CPU utilization percentage + ## @param gateway.autoscaling.targetMemory Target Memory utilization percentage + autoscaling: + enabled: false + minReplicas: "4" + maxReplicas: "4" + targetCPU: "" + targetMemory: "" + ## @param gateway.priorityClassName Pod priority class name for MinIO® Gateway + ## + priorityClassName: "" + ## Gateway authentication configuration + ## + auth: + ## Authentication configuration for Azure. Ignored unless type=azure + ## @param gateway.auth.azure.accessKey Access key to access MinIO® using Azure Gateway + ## @param gateway.auth.azure.secretKey Secret key to access MinIO® using Azure Gateway + ## @param gateway.auth.azure.serviceEndpoint Azure Blob Storage custom endpoint + ## @param gateway.auth.azure.storageAccountName Azure Storage Account Name to use to access Azure Blob Storage + ## @param gateway.auth.azure.storageAccountKey Azure Storage Account Key to use to access Azure Blob Storage + ## @param gateway.auth.azure.storageAccountNameExistingSecret Existing Secret name to extract Azure Storage Account Name from to access Azure Blob Storage + ## @param gateway.auth.azure.storageAccountNameExistingSecretKey Existing Secret key to extract Azure Storage Account Name from to use to access Azure Blob Storage + ## @param gateway.auth.azure.storageAccountKeyExistingSecret Existing Secret name to extract Azure Storage Account Key from to access Azure Blob Storage + ## @param gateway.auth.azure.storageAccountKeyExistingSecretKey Existing Secret key to extract Azure Storage Account Key from to use to access Azure Blob Storage + ## + azure: + accessKey: "" + secretKey: "" + serviceEndpoint: "" + storageAccountName: "" + storageAccountKey: "" + storageAccountNameExistingSecret: "" + storageAccountNameExistingSecretKey: "" + storageAccountKeyExistingSecret: "" + storageAccountKeyExistingSecretKey: "" + ## Authentication configuration for GCS. Ignored unless type=gcs + ## @param gateway.auth.gcs.accessKey Access key to access MinIO® using GCS Gateway + ## @param gateway.auth.gcs.secretKey Secret key to access MinIO® using GCS Gateway + ## @param gateway.auth.gcs.keyJSON Service Account key to access GCS + ## @param gateway.auth.gcs.projectID GCP Project ID to use + ## + gcs: + accessKey: "" + secretKey: "" + keyJSON: "" + projectID: "" + ## Authentication configuration for NAS. Ignored unless type=nas + ## @param gateway.auth.nas.accessKey Access key to access MinIO® using NAS Gateway + ## @param gateway.auth.nas.secretKey Secret key to access MinIO® using NAS Gateway + ## + nas: + accessKey: "" + secretKey: "" + ## Authentication configuration for S3. Ignored unless type=s3 + ## @param gateway.auth.s3.accessKey Access key to use to access AWS S3 + ## @param gateway.auth.s3.secretKey Secret key to use to access AWS S3 + ## @param gateway.auth.s3.serviceEndpoint AWS S3 endpoint + ## + s3: + accessKey: "" + secretKey: "" + serviceEndpoint: https://s3.amazonaws.com diff --git a/charts/modules/mkt/test/test.yaml b/charts/modules/mkt/test/test.yaml new file mode 100644 index 0000000..70145d7 --- /dev/null +++ b/charts/modules/mkt/test/test.yaml @@ -0,0 +1,23 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: yourls-server-out + namespace: vika-mkt + labels: + app: yourls-server + app.kubernetes.io/name: yourls-server + app.kubernetes.io/version: v1 + version: v1 +spec: + ports: + - name: tcp-80 + protocol: TCP + port: 80 + targetPort: 80 + selector: + app: yourls-server + app.kubernetes.io/name: yourls-server + app.kubernetes.io/version: v1 + type: LoadBalancer + sessionAffinity: None \ No newline at end of file diff --git a/charts/modules/mkt/yourls-server/.helmignore b/charts/modules/mkt/yourls-server/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/charts/modules/mkt/yourls-server/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/modules/mkt/yourls-server/Chart.yaml b/charts/modules/mkt/yourls-server/Chart.yaml new file mode 100644 index 0000000..d4c802f --- /dev/null +++ b/charts/modules/mkt/yourls-server/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: yourls-server +description: yourls-server +type: application +version: 1.1.0 +appVersion: "1.1.0" diff --git a/charts/modules/mkt/yourls-server/templates/tests/test-connection.yaml b/charts/modules/mkt/yourls-server/templates/tests/test-connection.yaml new file mode 100644 index 0000000..2cc0357 --- /dev/null +++ b/charts/modules/mkt/yourls-server/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +{{/*apiVersion: v1*/}} +{{/*kind: Pod*/}} +{{/*metadata:*/}} +{{/* name: "{{ include "yourls.fullname" . }}-test-connection"*/}} +{{/* labels:*/}} +{{/* {{- include "yourls.labels" . | nindent 4 }}*/}} +{{/* annotations:*/}} +{{/* "helm.sh/hook": test*/}} +{{/*spec:*/}} +{{/* containers:*/}} +{{/* - name: wget*/}} +{{/* image: busybox*/}} +{{/* command: ['wget']*/}} +{{/* args: ['{{ include "yourls.fullname" . }}:{{ .Values.service.port }}']*/}} +{{/* restartPolicy: Never*/}} diff --git a/charts/modules/mkt/yourls-server/templates/yourls.yaml b/charts/modules/mkt/yourls-server/templates/yourls.yaml new file mode 100644 index 0000000..5bf40c8 --- /dev/null +++ b/charts/modules/mkt/yourls-server/templates/yourls.yaml @@ -0,0 +1,144 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: yourls-server + namespace: {{.Values.namespace}} + labels: + app: yourls-server + app.kubernetes.io/name: yourls-server + app.kubernetes.io/version: v1 + version: v1 +spec: + ports: + - name: tcp-80 + protocol: TCP + port: 80 + targetPort: 80 + selector: + app: yourls-server + app.kubernetes.io/name: yourls-server + app.kubernetes.io/version: v1 + type: ClusterIP + sessionAffinity: None +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: yourls-server + namespace: {{.Values.namespace}} + labels: + app: yourls-server + app.kubernetes.io/name: yourls-server + app.kubernetes.io/version: v1 + version: v1 + annotations: + deployment.kubernetes.io/revision: '27' + kubesphere.io/creator: xiechuanjun + servicemesh.kubesphere.io/enabled: 'false' +spec: + replicas: 1 + selector: + matchLabels: + app: yourls-server + app.kubernetes.io/name: yourls-server + app.kubernetes.io/version: v1 + version: v1 + template: + metadata: + labels: + app: yourls-server + app.kubernetes.io/name: yourls-server + app.kubernetes.io/version: v1 + version: v1 + annotations: + kubesphere.io/restartedAt: '2021-10-22T11:25:22.892Z' + logging.kubesphere.io/logsidecar-config: '{}' + sidecar.istio.io/inject: 'false' + spec: + containers: + - name: yourls-server + image: 'docker.vika.ltd/vikadata/vika/yourls:1.7.6-ldap.1' + ports: + - name: tcp-80 + containerPort: 80 + protocol: TCP + env: + - name: YOURLS_DB_HOST + value: {{.Values.serviceVariable.YOURLS_DB_HOST}} + - name: YOURLS_DB_USER + value: {{.Values.serviceVariable.YOURLS_DB_USER}} + - name: YOURLS_DB_PASS + value: {{.Values.serviceVariable.YOURLS_DB_PASS}} + - name: YOURLS_SITE + value: {{.Values.serviceVariable.YOURLS_SITE}} + - name: YOURLS_USER + value: {{.Values.serviceVariable.YOURLS_USER}} + - name: YOURLS_PASS + value: {{.Values.serviceVariable.YOURLS_PASS}} + - name: YOURLS_LANG + value: {{.Values.serviceVariable.YOURLS_LANG}} + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 400m + memory: 256Mi + lifecycle: + postStart: + exec: + command: + - /bin/bash + - '-c' + - > + set -ex + + i=1 + + while [ $i -le 10 ] + + do + if [ ! -f "/var/www/html/user/config.php" ];then + sleep 3 + let i++ + else + break + fi + done + + cat >>/var/www/html/user/config.php << EOF + + define('LDAPAUTH_HOST', '{{.Values.serviceVariable.LDAPAUTH_HOST}}' ); + + define('LDAPAUTH_PORT', '{{.Values.serviceVariable.LDAPAUTH_PORT}}' ); + + define('LDAPAUTH_BASE', '{{.Values.serviceVariable.LDAPAUTH_BASE}}' ); + + define('LDAPAUTH_USERNAME_FIELD', '{{.Values.serviceVariable.LDAPAUTH_USERNAME_FIELD}}'); + + define('LDAPAUTH_SEARCH_USER', '{{.Values.serviceVariable.LDAPAUTH_SEARCH_USER}}' ); + + define('LDAPAUTH_SEARCH_PASS', '{{.Values.serviceVariable.LDAPAUTH_SEARCH_PASS}}'); + + EOF + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + imagePullPolicy: IfNotPresent + securityContext: {} + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + serviceAccountName: default + securityContext: {} + imagePullSecrets: + - name: regcred + affinity: {} + schedulerName: default-scheduler + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 1 + revisionHistoryLimit: 10 + progressDeadlineSeconds: 600 diff --git a/charts/modules/mkt/yourls-server/values.yaml b/charts/modules/mkt/yourls-server/values.yaml new file mode 100644 index 0000000..4eb5b4e --- /dev/null +++ b/charts/modules/mkt/yourls-server/values.yaml @@ -0,0 +1,15 @@ +namespace: vika-playground +serviceVariable: + YOURLS_DB_HOST: xxx + YOURLS_DB_USER: vika + YOURLS_DB_PASS: xxx + YOURLS_SITE: 'http://xxxx' + YOURLS_USER: xxx + YOURLS_PASS: xxx + YOURLS_LANG: zh_CN + LDAPAUTH_HOST: ldap://ldap.authing.cn + LDAPAUTH_PORT: 389 + LDAPAUTH_BASE: ou=users,o=xxx,dc=authing,dc=cn + LDAPAUTH_USERNAME_FIELD: email + LDAPAUTH_SEARCH_USER: ou=users,o=xxx,dc=authing,dc=cn + LDAPAUTH_SEARCH_PASS: xxx \ No newline at end of file diff --git a/charts/modules/mysql/.helmignore b/charts/modules/mysql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/charts/modules/mysql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/modules/mysql/Chart.lock b/charts/modules/mysql/Chart.lock new file mode 100644 index 0000000..f54e771 --- /dev/null +++ b/charts/modules/mysql/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.1.2 +digest: sha256:1c365a4551a2f4098e9584dc176b289c10437c679c7c3e2ec6153cabf863e1a4 +generated: "2022-11-08T17:35:30.280445479Z" diff --git a/charts/modules/mysql/Chart.yaml b/charts/modules/mysql/Chart.yaml new file mode 100644 index 0000000..ae3b68b --- /dev/null +++ b/charts/modules/mysql/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 8.0.31 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: MySQL is a fast, reliable, scalable, and easy to use open source relational + database system. Designed to handle mission-critical, heavy-load production applications. +home: https://github.com/bitnami/charts/tree/main/bitnami/mysql +icon: https://bitnami.com/assets/stacks/mysql/img/mysql-stack-220x234.png +keywords: +- mysql +- database +- sql +- cluster +- high availability +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: mysql +sources: +- https://github.com/bitnami/containers/tree/main/bitnami/mysql +- https://mysql.com +version: 9.4.4 diff --git a/charts/modules/mysql/README.md b/charts/modules/mysql/README.md new file mode 100644 index 0000000..b20e828 --- /dev/null +++ b/charts/modules/mysql/README.md @@ -0,0 +1,554 @@ + + +# MySQL packaged by Bitnami + +MySQL is a fast, reliable, scalable, and easy to use open source relational database system. Designed to handle mission-critical, heavy-load production applications. + +[Overview of MySQL](http://www.mysql.com) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```bash +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/mysql +``` + +## Introduction + +This chart bootstraps a [MySQL](https://github.com/bitnami/containers/tree/main/bitnami/mysql) replication cluster deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/mysql +``` + +These commands deploy MySQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `clusterDomain` | Cluster domain | `cluster.local` | +| `commonAnnotations` | Common annotations to add to all MySQL resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `commonLabels` | Common labels to add to all MySQL resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `extraDeploy` | Array with extra yaml to deploy with the chart. Evaluated as a template | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### MySQL common parameters + +| Name | Description | Value | +| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | +| `image.registry` | MySQL image registry | `docker.io` | +| `image.repository` | MySQL image repository | `bitnami/mysql` | +| `image.tag` | MySQL image tag (immutable tags are recommended) | `8.0.31-debian-11-r10` | +| `image.digest` | MySQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | MySQL image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `architecture` | MySQL architecture (`standalone` or `replication`) | `standalone` | +| `auth.rootPassword` | Password for the `root` user. Ignored if existing secret is provided | `""` | +| `auth.createDatabase` | Wheter to create the .Values.auth.database or not | `true` | +| `auth.database` | Name for a custom database to create | `my_database` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the new user. Ignored if existing secret is provided | `""` | +| `auth.replicationUser` | MySQL replication user | `replicator` | +| `auth.replicationPassword` | MySQL replication user password. Ignored if existing secret is provided | `""` | +| `auth.existingSecret` | Use existing secret for password details. The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password` | `""` | +| `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` | +| `auth.customPasswordFiles` | Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication` | `{}` | +| `initdbScripts` | Dictionary of initdb scripts | `{}` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `""` | + + +### MySQL Primary parameters + +| Name | Description | Value | +| ----------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | ------------------- | +| `primary.name` | Name of the primary database (eg primary, master, leader, ...) | `primary` | +| `primary.command` | Override default container command on MySQL Primary container(s) (useful when using custom images) | `[]` | +| `primary.args` | Override default container args on MySQL Primary container(s) (useful when using custom images) | `[]` | +| `primary.lifecycleHooks` | for the MySQL Primary container(s) to automate configuration before or after startup | `{}` | +| `primary.hostAliases` | Deployment pod host aliases | `[]` | +| `primary.configuration` | Configure MySQL Primary with a custom my.cnf file | `""` | +| `primary.existingConfigmap` | Name of existing ConfigMap with MySQL Primary configuration. | `""` | +| `primary.updateStrategy.type` | Update strategy type for the MySQL primary statefulset | `RollingUpdate` | +| `primary.podAnnotations` | Additional pod annotations for MySQL primary pods | `{}` | +| `primary.podAffinityPreset` | MySQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.podAntiAffinityPreset` | MySQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `primary.nodeAffinityPreset.type` | MySQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.nodeAffinityPreset.key` | MySQL primary node label key to match Ignored if `primary.affinity` is set. | `""` | +| `primary.nodeAffinityPreset.values` | MySQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `primary.affinity` | Affinity for MySQL primary pods assignment | `{}` | +| `primary.nodeSelector` | Node labels for MySQL primary pods assignment | `{}` | +| `primary.tolerations` | Tolerations for MySQL primary pods assignment | `[]` | +| `primary.priorityClassName` | MySQL primary pods' priorityClassName | `""` | +| `primary.runtimeClassName` | MySQL primary pods' runtimeClassName | `""` | +| `primary.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `primary.terminationGracePeriodSeconds` | In seconds, time the given to the MySQL primary pod needs to terminate gracefully | `""` | +| `primary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `primary.podManagementPolicy` | podManagementPolicy to manage scaling operation of MySQL primary pods | `""` | +| `primary.podSecurityContext.enabled` | Enable security context for MySQL primary pods | `true` | +| `primary.podSecurityContext.fsGroup` | Group ID for the mounted volumes' filesystem | `1001` | +| `primary.containerSecurityContext.enabled` | MySQL primary container securityContext | `true` | +| `primary.containerSecurityContext.runAsUser` | User ID for the MySQL primary container | `1001` | +| `primary.containerSecurityContext.runAsNonRoot` | Set MySQL primary container's Security Context runAsNonRoot | `true` | +| `primary.resources.limits` | The resources limits for MySQL primary containers | `{}` | +| `primary.resources.requests` | The requested resources for MySQL primary containers | `{}` | +| `primary.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `primary.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `primary.startupProbe.enabled` | Enable startupProbe | `true` | +| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `15` | +| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `10` | +| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `primary.customLivenessProbe` | Override default liveness probe for MySQL primary containers | `{}` | +| `primary.customReadinessProbe` | Override default readiness probe for MySQL primary containers | `{}` | +| `primary.customStartupProbe` | Override default startup probe for MySQL primary containers | `{}` | +| `primary.extraFlags` | MySQL primary additional command line flags | `""` | +| `primary.extraEnvVars` | Extra environment variables to be set on MySQL primary containers | `[]` | +| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for MySQL primary containers | `""` | +| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for MySQL primary containers | `""` | +| `primary.persistence.enabled` | Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir | `true` | +| `primary.persistence.existingClaim` | Name of an existing `PersistentVolumeClaim` for MySQL primary replicas | `""` | +| `primary.persistence.subPath` | The name of a volume's sub path to mount for persistence | `""` | +| `primary.persistence.storageClass` | MySQL primary persistent volume storage Class | `""` | +| `primary.persistence.annotations` | MySQL primary persistent volume claim annotations | `{}` | +| `primary.persistence.accessModes` | MySQL primary persistent volume access Modes | `["ReadWriteOnce"]` | +| `primary.persistence.size` | MySQL primary persistent volume size | `8Gi` | +| `primary.persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `primary.extraVolumes` | Optionally specify extra list of additional volumes to the MySQL Primary pod(s) | `[]` | +| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MySQL Primary container(s) | `[]` | +| `primary.initContainers` | Add additional init containers for the MySQL Primary pod(s) | `[]` | +| `primary.sidecars` | Add additional sidecar containers for the MySQL Primary pod(s) | `[]` | +| `primary.service.type` | MySQL Primary K8s service type | `ClusterIP` | +| `primary.service.ports.mysql` | MySQL Primary K8s service port | `3306` | +| `primary.service.nodePorts.mysql` | MySQL Primary K8s service node port | `""` | +| `primary.service.clusterIP` | MySQL Primary K8s service clusterIP IP | `""` | +| `primary.service.loadBalancerIP` | MySQL Primary loadBalancerIP if service type is `LoadBalancer` | `""` | +| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when MySQL Primary service is LoadBalancer | `[]` | +| `primary.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `primary.service.annotations` | Additional custom annotations for MySQL primary service | `{}` | +| `primary.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `primary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `primary.service.headless.annotations` | Additional custom annotations for headless MySQL primary service. | `{}` | +| `primary.pdb.create` | Enable/disable a Pod Disruption Budget creation for MySQL primary pods | `false` | +| `primary.pdb.minAvailable` | Minimum number/percentage of MySQL primary pods that should remain scheduled | `1` | +| `primary.pdb.maxUnavailable` | Maximum number/percentage of MySQL primary pods that may be made unavailable | `""` | +| `primary.podLabels` | MySQL Primary pod label. If labels are same as commonLabels , this will take precedence | `{}` | + + +### MySQL Secondary parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `secondary.name` | Name of the secondary database (eg secondary, slave, ...) | `secondary` | +| `secondary.replicaCount` | Number of MySQL secondary replicas | `1` | +| `secondary.hostAliases` | Deployment pod host aliases | `[]` | +| `secondary.command` | Override default container command on MySQL Secondary container(s) (useful when using custom images) | `[]` | +| `secondary.args` | Override default container args on MySQL Secondary container(s) (useful when using custom images) | `[]` | +| `secondary.lifecycleHooks` | for the MySQL Secondary container(s) to automate configuration before or after startup | `{}` | +| `secondary.configuration` | Configure MySQL Secondary with a custom my.cnf file | `""` | +| `secondary.existingConfigmap` | Name of existing ConfigMap with MySQL Secondary configuration. | `""` | +| `secondary.updateStrategy.type` | Update strategy type for the MySQL secondary statefulset | `RollingUpdate` | +| `secondary.podAnnotations` | Additional pod annotations for MySQL secondary pods | `{}` | +| `secondary.podAffinityPreset` | MySQL secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `secondary.podAntiAffinityPreset` | MySQL secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `secondary.nodeAffinityPreset.type` | MySQL secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `secondary.nodeAffinityPreset.key` | MySQL secondary node label key to match Ignored if `secondary.affinity` is set. | `""` | +| `secondary.nodeAffinityPreset.values` | MySQL secondary node label values to match. Ignored if `secondary.affinity` is set. | `[]` | +| `secondary.affinity` | Affinity for MySQL secondary pods assignment | `{}` | +| `secondary.nodeSelector` | Node labels for MySQL secondary pods assignment | `{}` | +| `secondary.tolerations` | Tolerations for MySQL secondary pods assignment | `[]` | +| `secondary.priorityClassName` | MySQL secondary pods' priorityClassName | `""` | +| `secondary.runtimeClassName` | MySQL secondary pods' runtimeClassName | `""` | +| `secondary.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `secondary.terminationGracePeriodSeconds` | In seconds, time the given to the MySQL secondary pod needs to terminate gracefully | `""` | +| `secondary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `secondary.podManagementPolicy` | podManagementPolicy to manage scaling operation of MySQL secondary pods | `""` | +| `secondary.podSecurityContext.enabled` | Enable security context for MySQL secondary pods | `true` | +| `secondary.podSecurityContext.fsGroup` | Group ID for the mounted volumes' filesystem | `1001` | +| `secondary.containerSecurityContext.enabled` | MySQL secondary container securityContext | `true` | +| `secondary.containerSecurityContext.runAsUser` | User ID for the MySQL secondary container | `1001` | +| `secondary.containerSecurityContext.runAsNonRoot` | Set MySQL secondary container's Security Context runAsNonRoot | `true` | +| `secondary.resources.limits` | The resources limits for MySQL secondary containers | `{}` | +| `secondary.resources.requests` | The requested resources for MySQL secondary containers | `{}` | +| `secondary.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `secondary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `secondary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `secondary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `secondary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `secondary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `secondary.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `secondary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `secondary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `secondary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `secondary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `secondary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `secondary.startupProbe.enabled` | Enable startupProbe | `true` | +| `secondary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `15` | +| `secondary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `secondary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `secondary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `secondary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `secondary.customLivenessProbe` | Override default liveness probe for MySQL secondary containers | `{}` | +| `secondary.customReadinessProbe` | Override default readiness probe for MySQL secondary containers | `{}` | +| `secondary.customStartupProbe` | Override default startup probe for MySQL secondary containers | `{}` | +| `secondary.extraFlags` | MySQL secondary additional command line flags | `""` | +| `secondary.extraEnvVars` | An array to add extra environment variables on MySQL secondary containers | `[]` | +| `secondary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for MySQL secondary containers | `""` | +| `secondary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for MySQL secondary containers | `""` | +| `secondary.persistence.enabled` | Enable persistence on MySQL secondary replicas using a `PersistentVolumeClaim` | `true` | +| `secondary.persistence.existingClaim` | Name of an existing `PersistentVolumeClaim` for MySQL secondary replicas | `""` | +| `secondary.persistence.subPath` | The name of a volume's sub path to mount for persistence | `""` | +| `secondary.persistence.storageClass` | MySQL secondary persistent volume storage Class | `""` | +| `secondary.persistence.annotations` | MySQL secondary persistent volume claim annotations | `{}` | +| `secondary.persistence.accessModes` | MySQL secondary persistent volume access Modes | `["ReadWriteOnce"]` | +| `secondary.persistence.size` | MySQL secondary persistent volume size | `8Gi` | +| `secondary.persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `secondary.extraVolumes` | Optionally specify extra list of additional volumes to the MySQL secondary pod(s) | `[]` | +| `secondary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MySQL secondary container(s) | `[]` | +| `secondary.initContainers` | Add additional init containers for the MySQL secondary pod(s) | `[]` | +| `secondary.sidecars` | Add additional sidecar containers for the MySQL secondary pod(s) | `[]` | +| `secondary.service.type` | MySQL secondary Kubernetes service type | `ClusterIP` | +| `secondary.service.ports.mysql` | MySQL secondary Kubernetes service port | `3306` | +| `secondary.service.nodePorts.mysql` | MySQL secondary Kubernetes service node port | `""` | +| `secondary.service.clusterIP` | MySQL secondary Kubernetes service clusterIP IP | `""` | +| `secondary.service.loadBalancerIP` | MySQL secondary loadBalancerIP if service type is `LoadBalancer` | `""` | +| `secondary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `secondary.service.loadBalancerSourceRanges` | Addresses that are allowed when MySQL secondary service is LoadBalancer | `[]` | +| `secondary.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `secondary.service.annotations` | Additional custom annotations for MySQL secondary service | `{}` | +| `secondary.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `secondary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `secondary.service.headless.annotations` | Additional custom annotations for headless MySQL secondary service. | `{}` | +| `secondary.pdb.create` | Enable/disable a Pod Disruption Budget creation for MySQL secondary pods | `false` | +| `secondary.pdb.minAvailable` | Minimum number/percentage of MySQL secondary pods that should remain scheduled | `1` | +| `secondary.pdb.maxUnavailable` | Maximum number/percentage of MySQL secondary pods that may be made unavailable | `""` | +| `secondary.podLabels` | Additional pod labels for MySQL secondary pods | `{}` | + + +### RBAC parameters + +| Name | Description | Value | +| --------------------------------------------- | -------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable the creation of a ServiceAccount for MySQL pods | `true` | +| `serviceAccount.name` | Name of the created ServiceAccount | `""` | +| `serviceAccount.annotations` | Annotations for MySQL Service Account | `{}` | +| `serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | + + +### Network Policy + +| Name | Description | Value | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | The Policy model to apply. | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed to MySQL | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r50` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources` | Init container volume-permissions resources | `{}` | + + +### Metrics parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Exporter image registry | `docker.io` | +| `metrics.image.repository` | Exporter image repository | `bitnami/mysqld-exporter` | +| `metrics.image.tag` | Exporter image tag (immutable tags are recommended) | `0.14.0-debian-11-r55` | +| `metrics.image.digest` | Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.service.type` | Kubernetes service type for MySQL Prometheus Exporter | `ClusterIP` | +| `metrics.service.port` | MySQL Prometheus Exporter service port | `9104` | +| `metrics.service.annotations` | Prometheus exporter service annotations | `{}` | +| `metrics.extraArgs.primary` | Extra args to be passed to mysqld_exporter on Primary pods | `[]` | +| `metrics.extraArgs.secondary` | Extra args to be passed to mysqld_exporter on Secondary pods | `[]` | +| `metrics.resources.limits` | The resources limits for MySQL prometheus exporter containers | `{}` | +| `metrics.resources.requests` | The requested resources for MySQL prometheus exporter containers | `{}` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.labels` | Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with | `{}` | +| `metrics.serviceMonitor.annotations` | ServiceMonitor annotations | `{}` | +| `metrics.prometheusRule.enabled` | Creates a Prometheus Operator prometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the prometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | Prometheus Rule definitions | `[]` | + + +The above parameters map to the env variables defined in [bitnami/mysql](https://github.com/bitnami/containers/tree/main/bitnami/mysql). For more information please refer to the [bitnami/mysql](https://github.com/bitnami/containers/tree/main/bitnami/mysql) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.rootPassword=secretpassword,auth.database=app_database \ + my-repo/mysql +``` + +The above command sets the MySQL `root` account password to `secretpassword`. Additionally it creates a database named `app_database`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml my-repo/mysql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Use a different MySQL version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/mysql/configuration/change-image-version/). + +### Customize a new MySQL instance + +The [Bitnami MySQL](https://github.com/bitnami/containers/tree/main/bitnami/mysql) image allows you to use your custom scripts to initialize a fresh instance. Custom scripts may be specified using the `initdbScripts` parameter. Alternatively, an external ConfigMap may be created with all the initialization scripts and the ConfigMap passed to the chart via the `initdbScriptsConfigMap` parameter. Note that this will override the `initdbScripts` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +These scripts are treated differently depending on their extension. While `.sh` scripts are executed on all the nodes, `.sql` and `.sql.gz` scripts are only executed on the primary nodes. This is because `.sh` scripts support conditional tests to identify the type of node they are running on, while such tests are not supported in `.sql` or `sql.gz` files. + +Refer to the [chart documentation for more information and a usage example](http://docs.bitnami.com/kubernetes/infrastructure/mysql/configuration/customize-new-instance/). + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as MySQL, you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Persistence + +The [Bitnami MySQL](https://github.com/bitnami/containers/tree/main/bitnami/mysql) image stores the MySQL data and configurations at the `/bitnami/mysql` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning by default. An existing PersistentVolumeClaim can also be defined for this purpose. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +## Network Policy + +To enable network policy for MySQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 3306. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to MySQL. +This label will be displayed in the output of a successful install. + +## Pod affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.rootPassword` parameter when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Administrator credentials' section. Please note down the password and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release my-repo/mysql --set auth.rootPassword=[ROOT_PASSWORD] +``` + +| Note: you need to substitute the placeholder _[ROOT_PASSWORD]_ with the value obtained in the installation notes. + +### To 9.0.0 + +This major release renames several values in this chart and adds missing features, in order to be aligned with the rest of the assets in the Bitnami charts repository. + +Affected values: + +- `schedulerName` was renamed as `primary.schedulerName` and `secondary.schedulerName`. +- The way how passwords are handled has been refactored and value `auth.forcePassword` has been removed. Now, the password configuration will have the following priority: + 1. Search for an already existing 'Secret' resource and reuse previous password. + 2. Password provided via the values.yaml + 3. If no secret existed, and no password was provided, the bitnami/mysql chart will set a randomly generated password. +- `primary.service.port` was renamed as `primary.service.ports.mysql`. +- `secondary.service.port` was renamed as `secondary.service.ports.mysql`. +- `primary.service.nodePort` was renamed as `primary.service.nodePorts.mysql`. +- `secondary.service.nodePort` was renamed as `secondary.service.nodePorts.mysql`. +- `primary.updateStrategy` and `secondary.updateStrategy` are now interpreted as an object and not a string. +- Values `primary.rollingUpdatePartition` and `secondary.rollingUpdatePartition` have been removed. In cases were they are needed, they can be set inside `.*updateStrategy`. +- `primary.pdb.enabled` was renamed as `primary.pdb.create`. +- `secondary.pdb.enabled` was renamed as `secondary.pdb.create`. +- `metrics.serviceMonitor.additionalLabels` was renamed as `metrics.serviceMonitor.labels` +- `metrics.serviceMonitor.relabellings` was removed, previously used to configured `metricRelabelings` field. We introduced two new values: `metrics.serviceMonitor.relabelings` and `metrics.serviceMonitor.metricRelabelings` that can be used to configured the serviceMonitor homonimous field. + +### To 8.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - The terms *master* and *slave* have been replaced by the terms *primary* and *secondary*. Therefore, parameters prefixed with `master` or `slave` are now prefixed with `primary` or `secondary`, respectively. + - Credentials parameters are reorganized under the `auth` parameter. + - `replication.enabled` parameter is deprecated in favor of `architecture` parameter that accepts two values: `standalone` and `replication`. +- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels). +- This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. To upgrade to `8.0.0`, install a new release of the MySQL chart, and migrate the data from your previous release. You have 2 alternatives to do so: + - Create a backup of the database, and restore it on the new release using tools such as [mysqldump](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html). + - Reuse the PVC used to hold the master data on your previous release. To do so, use the `primary.persistence.existingClaim` parameter. The following example assumes that the release name is `mysql`: + +```bash +$ helm install mysql my-repo/mysql --set auth.rootPassword=[ROOT_PASSWORD] --set primary.persistence.existingClaim=[EXISTING_PVC] +``` + +| Note: you need to substitute the placeholder _[EXISTING_PVC]_ with the name of the PVC used on your previous release, and _[ROOT_PASSWORD]_ with the root password used in your previous release. + +### To 7.0.0 + +[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/mysql/administration/upgrade-helm3/). + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is mysql: + +```console +$ kubectl delete statefulset mysql-master --cascade=false +$ kubectl delete statefulset mysql-slave --cascade=false +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/charts/modules/mysql/charts/common/Chart.yaml b/charts/modules/mysql/charts/common/Chart.yaml new file mode 100644 index 0000000..6f0c3a6 --- /dev/null +++ b/charts/modules/mysql/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.1.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.1.2 diff --git a/charts/modules/mysql/charts/common/README.md b/charts/modules/mysql/charts/common/README.md new file mode 100644 index 0000000..a2ecd60 --- /dev/null +++ b/charts/modules/mysql/charts/common/README.md @@ -0,0 +1,350 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/charts/modules/mysql/charts/common/templates/_affinities.tpl b/charts/modules/mysql/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..497068f --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_affinities.tpl @@ -0,0 +1,98 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_capabilities.tpl b/charts/modules/mysql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..9d9b760 --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_errors.tpl b/charts/modules/mysql/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_images.tpl b/charts/modules/mysql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..46c659e --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_ingress.tpl b/charts/modules/mysql/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..831da9c --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_labels.tpl b/charts/modules/mysql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_names.tpl b/charts/modules/mysql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..1bdac8b --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_secrets.tpl b/charts/modules/mysql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4267d42 --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_storage.tpl b/charts/modules/mysql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_tplvalues.tpl b/charts/modules/mysql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_utils.tpl b/charts/modules/mysql/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..8c22b2a --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/_warnings.tpl b/charts/modules/mysql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/validations/_cassandra.tpl b/charts/modules/mysql/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/validations/_mariadb.tpl b/charts/modules/mysql/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/validations/_mongodb.tpl b/charts/modules/mysql/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..f820ec1 --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/validations/_mysql.tpl b/charts/modules/mysql/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 0000000..74472a0 --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/validations/_postgresql.tpl b/charts/modules/mysql/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/validations/_redis.tpl b/charts/modules/mysql/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..dcccfc1 --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/templates/validations/_validations.tpl b/charts/modules/mysql/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/charts/modules/mysql/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/charts/common/values.yaml b/charts/modules/mysql/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/charts/modules/mysql/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/charts/modules/mysql/templates/NOTES.txt b/charts/modules/mysql/templates/NOTES.txt new file mode 100644 index 0000000..ecf604c --- /dev/null +++ b/charts/modules/mysql/templates/NOTES.txt @@ -0,0 +1,75 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ include "common.names.namespace" . }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/mysql/entrypoint.sh /opt/bitnami/scripts/mysql/run.sh + +{{- else }} + +Tip: + + Watch the deployment status using the command: kubectl get pods -w --namespace {{ include "common.names.namespace" . }} + +Services: + + echo Primary: {{ include "mysql.primary.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.primary.service.ports.mysql }} +{{- if eq .Values.architecture "replication" }} + echo Secondary: {{ include "mysql.secondary.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}:{{ .Values.secondary.service.ports.mysql }} +{{- end }} + +Execute the following to get the administrator credentials: + + echo Username: root + MYSQL_ROOT_PASSWORD=$(kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ template "mysql.secretName" . }} -o jsonpath="{.data.mysql-root-password}" | base64 -d) + +To connect to your database: + + 1. Run a pod that you can use as a client: + + kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --image {{ template "mysql.image" . }} --namespace {{ include "common.names.namespace" . }} --env MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD --command -- bash + + 2. To connect to primary service (read/write): + + mysql -h {{ include "mysql.primary.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} -uroot -p"$MYSQL_ROOT_PASSWORD" + +{{- if eq .Values.architecture "replication" }} + + 3. To connect to secondary service (read-only): + + mysql -h {{ include "mysql.secondary.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} -uroot -p"$MYSQL_ROOT_PASSWORD" +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "common.names.fullname" . }}-client=true" will be able to connect to MySQL. +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the MySQL Prometheus metrics from outside the cluster execute the following commands: + + kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ printf "%s-metrics" (include "common.names.fullname" .) }} {{ .Values.metrics.service.port }}:{{ .Values.metrics.service.port }} & + curl http://127.0.0.1:{{ .Values.metrics.service.port }}/metrics + +{{- end }} + +{{ include "mysql.validateValues" . }} +{{ include "mysql.checkRollingTags" . }} +{{- end }} diff --git a/charts/modules/mysql/templates/_helpers.tpl b/charts/modules/mysql/templates/_helpers.tpl new file mode 100644 index 0000000..322826f --- /dev/null +++ b/charts/modules/mysql/templates/_helpers.tpl @@ -0,0 +1,161 @@ +{{/* vim: set filetype=mustache: */}} + +{{- define "mysql.primary.fullname" -}} +{{- if eq .Values.architecture "replication" }} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.primary.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- include "common.names.fullname" . -}} +{{- end -}} +{{- end -}} + +{{- define "mysql.secondary.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.secondary.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper MySQL image name +*/}} +{{- define "mysql.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper metrics image name +*/}} +{{- define "mysql.metrics.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mysql.volumePermissions.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mysql.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "mysql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} + {{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} + {{- printf "%s-init-scripts" (include "mysql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* + Returns the proper service account name depending if an explicit service account name is set + in the values file. If the name is not set it will default to either mysql.fullname if serviceAccount.create + is true or default otherwise. +*/}} +{{- define "mysql.serviceAccountName" -}} + {{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} + {{- else -}} + {{ default "default" .Values.serviceAccount.name }} + {{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MySQL Primary configuration +*/}} +{{- define "mysql.primary.configmapName" -}} +{{- if .Values.primary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mysql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MySQL Secondary +*/}} +{{- define "mysql.primary.createConfigmap" -}} +{{- if and .Values.primary.configuration (not .Values.primary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MySQL Primary configuration +*/}} +{{- define "mysql.secondary.configmapName" -}} +{{- if .Values.secondary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.secondary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mysql.secondary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MySQL Secondary +*/}} +{{- define "mysql.secondary.createConfigmap" -}} +{{- if and (eq .Values.architecture "replication") .Values.secondary.configuration (not .Values.secondary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with MySQL credentials +*/}} +{{- define "mysql.secretName" -}} + {{- if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for MySQL +*/}} +{{- define "mysql.createSecret" -}} +{{- if and (not .Values.auth.existingSecret) (not .Values.auth.customPasswordFiles) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* Check if there are rolling tags in the images */}} +{{- define "mysql.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mysql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/mysql/templates/extra-list.yaml b/charts/modules/mysql/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/charts/modules/mysql/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/charts/modules/mysql/templates/metrics-svc.yaml b/charts/modules/mysql/templates/metrics-svc.yaml new file mode 100644 index 0000000..4d3339b --- /dev/null +++ b/charts/modules/mysql/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: metrics + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - port: {{ .Values.metrics.service.port }} + targetPort: metrics + protocol: TCP + name: metrics + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} +{{- end }} diff --git a/charts/modules/mysql/templates/networkpolicy.yaml b/charts/modules/mysql/templates/networkpolicy.yaml new file mode 100644 index 0000000..6b62bb5 --- /dev/null +++ b/charts/modules/mysql/templates/networkpolicy.yaml @@ -0,0 +1,40 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.primary.service.ports.mysql }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes + - ports: + - port: 9104 + {{- end }} +{{- end }} diff --git a/charts/modules/mysql/templates/primary/configmap.yaml b/charts/modules/mysql/templates/primary/configmap.yaml new file mode 100644 index 0000000..82d0774 --- /dev/null +++ b/charts/modules/mysql/templates/primary/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mysql.primary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + my.cnf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.configuration "context" $ ) | nindent 4 }} +{{- end -}} diff --git a/charts/modules/mysql/templates/primary/initialization-configmap.yaml b/charts/modules/mysql/templates/primary/initialization-configmap.yaml new file mode 100644 index 0000000..a34f80d --- /dev/null +++ b/charts/modules/mysql/templates/primary/initialization-configmap.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "mysql.primary.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} +{{- end }} diff --git a/charts/modules/mysql/templates/primary/pdb.yaml b/charts/modules/mysql/templates/primary/pdb.yaml new file mode 100644 index 0000000..ca22a0e --- /dev/null +++ b/charts/modules/mysql/templates/primary/pdb.yaml @@ -0,0 +1,25 @@ +{{- if .Values.primary.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.primary.pdb.minAvailable }} + minAvailable: {{ .Values.primary.pdb.minAvailable }} + {{- end }} + {{- if .Values.primary.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.primary.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/charts/modules/mysql/templates/primary/statefulset.yaml b/charts/modules/mysql/templates/primary/statefulset.yaml new file mode 100644 index 0000000..7be0254 --- /dev/null +++ b/charts/modules/mysql/templates/primary/statefulset.yaml @@ -0,0 +1,382 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + podManagementPolicy: {{ .Values.primary.podManagementPolicy | quote }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + serviceName: {{ include "mysql.primary.fullname" . }} + {{- if .Values.primary.updateStrategy }} + updateStrategy: {{- toYaml .Values.primary.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + {{- if (include "mysql.primary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.primary.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: primary + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "mysql.serviceAccountName" . }} + {{- include "mysql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.primary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.primary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.priorityClassName }} + priorityClassName: {{ .Values.primary.priorityClassName | quote }} + {{- end }} + {{- if .Values.primary.runtimeClassName }} + runtimeClassName: {{ .Values.primary.runtimeClassName | quote }} + {{- end }} + {{- if .Values.primary.schedulerName }} + schedulerName: {{ .Values.primary.schedulerName | quote }} + {{- end }} + {{- if .Values.primary.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.primary.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.primary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.primary.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.primary.terminationGracePeriodSeconds }} + {{- end }} + initContainers: + {{- if and .Values.primary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.primary.persistence.enabled }} + - name: volume-permissions + image: {{ include "mysql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + mkdir -p "/bitnami/mysql" + chown "{{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }}" "/bitnami/mysql" + find "/bitnami/mysql" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R "{{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }}" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.primary.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: mysql + image: {{ include "mysql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.primary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.primary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.primary.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if not (empty .Values.auth.username) }} + - name: MYSQL_USER + value: {{ .Values.auth.username | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-password" .Values.auth.customPasswordFiles.user }} + {{- else }} + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-password + {{- end }} + {{- end }} + {{- if and .Values.auth.createDatabase .Values.auth.database }} + - name: MYSQL_DATABASE + value: {{ .Values.auth.database | quote }} + {{- end }} + {{- if eq .Values.architecture "replication" }} + - name: MYSQL_REPLICATION_MODE + value: "master" + - name: MYSQL_REPLICATION_USER + value: {{ .Values.auth.replicationUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-replication-password" .Values.auth.customPasswordFiles.replicator }} + {{- else }} + - name: MYSQL_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-replication-password + {{- end }} + {{- end }} + {{- if .Values.primary.extraFlags }} + - name: MYSQL_EXTRA_FLAGS + value: "{{ .Values.primary.extraFlags }}" + {{- end }} + {{- if .Values.primary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.primary.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.primary.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.primary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.primary.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- end }} + {{- if .Values.primary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.primary.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- end }} + {{- if .Values.primary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.primary.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- end }} + {{- end }} + {{- if .Values.primary.resources }} + resources: {{ toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.primary.configuration .Values.primary.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + mountPath: /opt/bitnami/mysql/secrets/ + {{- end }} + {{- if .Values.primary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mysql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.primary }} {{ . }} {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + volumeMounts: + - name: mysql-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + {{- if .Values.primary.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.primary.configuration .Values.primary.existingConfigmap }} + - name: config + configMap: + name: {{ include "mysql.primary.configmapName" . }} + {{- end }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ include "mysql.initdbScriptsCM" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + secret: + secretName: {{ include "mysql.secretName" . }} + items: + - key: mysql-root-password + path: mysql-root-password + - key: mysql-password + path: mysql-password + {{- if eq .Values.architecture "replication" }} + - key: mysql-replication-password + path: mysql-replication-password + {{- end }} + {{- end }} + {{- if .Values.primary.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.primary.persistence.existingClaim . }} + {{- else if not .Values.primary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if and .Values.primary.persistence.enabled (not .Values.primary.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{ include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} + {{- end }} + annotations: + {{- if .Values.primary.persistence.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.primary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.primary.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.primary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} diff --git a/charts/modules/mysql/templates/primary/svc-headless.yaml b/charts/modules/mysql/templates/primary/svc-headless.yaml new file mode 100644 index 0000000..c430d94 --- /dev/null +++ b/charts/modules/mysql/templates/primary/svc-headless.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.primary.fullname" . }}-headless + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.primary.service.headless.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.primary.service.headless.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.headless.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: {{ .Values.primary.service.ports.mysql }} + targetPort: mysql + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/charts/modules/mysql/templates/primary/svc.yaml b/charts/modules/mysql/templates/primary/svc.yaml new file mode 100644 index 0000000..b61d453 --- /dev/null +++ b/charts/modules/mysql/templates/primary/svc.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.primary.service.type }} + {{- if and .Values.primary.service.clusterIP (eq .Values.primary.service.type "ClusterIP") }} + clusterIP: {{ .Values.primary.service.clusterIP }} + {{- end }} + {{- if .Values.primary.service.sessionAffinity }} + sessionAffinity: {{ .Values.primary.service.sessionAffinity }} + {{- end }} + {{- if .Values.primary.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.primary.service.type "LoadBalancer") (eq .Values.primary.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.primary.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.primary.service.ports.mysql }} + protocol: TCP + targetPort: mysql + {{- if (and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) .Values.primary.service.nodePorts.mysql) }} + nodePort: {{ .Values.primary.service.nodePorts.mysql }} + {{- else if eq .Values.primary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.primary.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/charts/modules/mysql/templates/prometheusrule.yaml b/charts/modules/mysql/templates/prometheusrule.yaml new file mode 100644 index 0000000..64fa44f --- /dev/null +++ b/charts/modules/mysql/templates/prometheusrule.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 6 }} +{{- end }} diff --git a/charts/modules/mysql/templates/role.yaml b/charts/modules/mysql/templates/role.yaml new file mode 100644 index 0000000..1ccc00a --- /dev/null +++ b/charts/modules/mysql/templates/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +{{- end }} diff --git a/charts/modules/mysql/templates/rolebinding.yaml b/charts/modules/mysql/templates/rolebinding.yaml new file mode 100644 index 0000000..9b05208 --- /dev/null +++ b/charts/modules/mysql/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "mysql.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "common.names.fullname" . -}} +{{- end }} diff --git a/charts/modules/mysql/templates/secondary/configmap.yaml b/charts/modules/mysql/templates/secondary/configmap.yaml new file mode 100644 index 0000000..c94724f --- /dev/null +++ b/charts/modules/mysql/templates/secondary/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mysql.secondary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + my.cnf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.configuration "context" $ ) | nindent 4 }} +{{- end -}} diff --git a/charts/modules/mysql/templates/secondary/pdb.yaml b/charts/modules/mysql/templates/secondary/pdb.yaml new file mode 100644 index 0000000..b4a5aee --- /dev/null +++ b/charts/modules/mysql/templates/secondary/pdb.yaml @@ -0,0 +1,25 @@ +{{- if and (eq .Values.architecture "replication") .Values.secondary.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.secondary.pdb.minAvailable }} + minAvailable: {{ .Values.secondary.pdb.minAvailable }} + {{- end }} + {{- if .Values.secondary.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.secondary.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/charts/modules/mysql/templates/secondary/statefulset.yaml b/charts/modules/mysql/templates/secondary/statefulset.yaml new file mode 100644 index 0000000..3b731b5 --- /dev/null +++ b/charts/modules/mysql/templates/secondary/statefulset.yaml @@ -0,0 +1,363 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.secondary.replicaCount }} + podManagementPolicy: {{ .Values.secondary.podManagementPolicy | quote }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: secondary + serviceName: {{ include "mysql.secondary.fullname" . }} + {{- if .Values.secondary.updateStrategy }} + updateStrategy: {{- toYaml .Values.secondary.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + {{- if (include "mysql.secondary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/secondary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.secondary.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: secondary + {{- if .Values.secondary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "mysql.serviceAccountName" . }} + {{- include "mysql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.secondary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.secondary.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.secondary.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.secondary.nodeAffinityPreset.type "key" .Values.secondary.nodeAffinityPreset.key "values" .Values.secondary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.secondary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.priorityClassName }} + priorityClassName: {{ .Values.secondary.priorityClassName | quote }} + {{- end }} + {{- if .Values.secondary.runtimeClassName }} + runtimeClassName: {{ .Values.secondary.runtimeClassName | quote }} + {{- end }} + {{- if .Values.secondary.schedulerName }} + schedulerName: {{ .Values.secondary.schedulerName | quote }} + {{- end }} + {{- if .Values.secondary.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.secondary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.secondary.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.secondary.terminationGracePeriodSeconds }} + {{- end }} + initContainers: + {{- if and .Values.secondary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.secondary.persistence.enabled }} + - name: volume-permissions + image: {{ include "mysql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + mkdir -p "/bitnami/mysql" + chown "{{ .Values.secondary.containerSecurityContext.runAsUser }}:{{ .Values.secondary.podSecurityContext.fsGroup }}" "/bitnami/mysql" + find "/bitnami/mysql" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R "{{ .Values.secondary.containerSecurityContext.runAsUser }}:{{ .Values.secondary.podSecurityContext.fsGroup }}" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- if .Values.secondary.persistence.subPath }} + subPath: {{ .Values.secondary.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.secondary.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: mysql + image: {{ include "mysql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.secondary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.secondary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.secondary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.secondary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.secondary.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MYSQL_REPLICATION_MODE + value: "slave" + - name: MYSQL_MASTER_HOST + value: {{ include "mysql.primary.fullname" . }} + - name: MYSQL_MASTER_PORT_NUMBER + value: {{ .Values.primary.service.ports.mysql | quote }} + - name: MYSQL_MASTER_ROOT_USER + value: "root" + - name: MYSQL_REPLICATION_USER + value: {{ .Values.auth.replicationUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_MASTER_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + - name: MYSQL_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-replication-password" .Values.auth.customPasswordFiles.replicator }} + {{- else }} + - name: MYSQL_MASTER_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + - name: MYSQL_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-replication-password + {{- end }} + {{- if .Values.secondary.extraFlags }} + - name: MYSQL_EXTRA_FLAGS + value: "{{ .Values.secondary.extraFlags }}" + {{- end }} + {{- if .Values.secondary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.secondary.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.secondary.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.secondary.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.secondary.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.secondary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.secondary.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.secondary.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- end }} + {{- if .Values.secondary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.secondary.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.secondary.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- end }} + {{- if .Values.secondary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.secondary.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.secondary.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- end }} + {{- end }} + {{- if .Values.secondary.resources }} + resources: {{ toYaml .Values.secondary.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- if .Values.secondary.persistence.subPath }} + subPath: {{ .Values.secondary.persistence.subPath }} + {{- end }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.secondary.configuration .Values.secondary.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + mountPath: /opt/bitnami/mysql/secrets/ + {{- end }} + {{- if .Values.secondary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mysql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.secondary }} {{ . }} {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + volumeMounts: + - name: mysql-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + {{- if .Values.secondary.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ include "mysql.initdbScriptsCM" . }} + {{- end }} + {{- if or .Values.secondary.configuration .Values.secondary.existingConfigmap }} + - name: config + configMap: + name: {{ include "mysql.secondary.configmapName" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + secret: + secretName: {{ template "mysql.secretName" . }} + items: + - key: mysql-root-password + path: mysql-root-password + - key: mysql-replication-password + path: mysql-replication-password + {{- end }} + {{- if .Values.secondary.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.secondary.persistence.enabled .Values.secondary.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.secondary.persistence.existingClaim . }} + {{- else if not .Values.secondary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{ include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} + {{- end }} + annotations: + {{- if .Values.secondary.persistence.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.secondary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.secondary.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.secondary.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.secondary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} +{{- end }} diff --git a/charts/modules/mysql/templates/secondary/svc-headless.yaml b/charts/modules/mysql/templates/secondary/svc-headless.yaml new file mode 100644 index 0000000..44cfa4a --- /dev/null +++ b/charts/modules/mysql/templates/secondary/svc-headless.yaml @@ -0,0 +1,31 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.secondary.fullname" . }}-headless + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.secondary.service.headless.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.secondary.service.headless.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.service.headless.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: {{ .Values.secondary.service.ports.mysql }} + targetPort: mysql + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/charts/modules/mysql/templates/secondary/svc.yaml b/charts/modules/mysql/templates/secondary/svc.yaml new file mode 100644 index 0000000..e6e662c --- /dev/null +++ b/charts/modules/mysql/templates/secondary/svc.yaml @@ -0,0 +1,54 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.secondary.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.secondary.service.type }} + {{- if and .Values.secondary.service.clusterIP (eq .Values.secondary.service.type "ClusterIP") }} + clusterIP: {{ .Values.secondary.service.clusterIP }} + {{- end }} + {{- if .Values.secondary.service.sessionAffinity }} + sessionAffinity: {{ .Values.secondary.service.sessionAffinity }} + {{- end }} + {{- if .Values.secondary.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.secondary.service.type "LoadBalancer") (eq .Values.secondary.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.secondary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.secondary.service.type "LoadBalancer") (not (empty .Values.secondary.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.secondary.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.secondary.service.type "LoadBalancer") (not (empty .Values.secondary.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.secondary.service.loadBalancerIP }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.secondary.service.ports.mysql }} + protocol: TCP + targetPort: mysql + {{- if (and (or (eq .Values.secondary.service.type "NodePort") (eq .Values.secondary.service.type "LoadBalancer")) .Values.secondary.service.nodePorts.mysql) }} + nodePort: {{ .Values.secondary.service.nodePorts.mysql }} + {{- else if eq .Values.secondary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.secondary.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/charts/modules/mysql/templates/secrets.yaml b/charts/modules/mysql/templates/secrets.yaml new file mode 100644 index 0000000..6da5327 --- /dev/null +++ b/charts/modules/mysql/templates/secrets.yaml @@ -0,0 +1,21 @@ +{{- if eq (include "mysql.createSecret" .) "true" }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + mysql-root-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "mysql-root-password" "length" 10 "providedValues" (list "auth.rootPassword") "context" $) }} + mysql-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "mysql-password" "length" 10 "providedValues" (list "auth.password") "context" $) }} + {{- if eq .Values.architecture "replication" }} + mysql-replication-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "mysql-replication-password" "length" 10 "providedValues" (list "auth.replicationPassword") "context" $) }} + {{- end }} +{{- end }} diff --git a/charts/modules/mysql/templates/serviceaccount.yaml b/charts/modules/mysql/templates/serviceaccount.yaml new file mode 100644 index 0000000..5044961 --- /dev/null +++ b/charts/modules/mysql/templates/serviceaccount.yaml @@ -0,0 +1,23 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mysql.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- if (not .Values.auth.customPasswordFiles) }} +secrets: + - name: {{ template "mysql.secretName" . }} +{{- end }} +{{- end }} diff --git a/charts/modules/mysql/templates/servicemonitor.yaml b/charts/modules/mysql/templates/servicemonitor.yaml new file mode 100644 index 0000000..47a9dad --- /dev/null +++ b/charts/modules/mysql/templates/servicemonitor.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel | quote }} + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/modules/mysql/values.schema.json b/charts/modules/mysql/values.schema.json new file mode 100644 index 0000000..df59156 --- /dev/null +++ b/charts/modules/mysql/values.schema.json @@ -0,0 +1,195 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "MySQL architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`", + "enum": ["standalone", "replication"] + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "required": ["username", "password"], + "if": { + "properties": { + "createDatabase": { "enum": [ true ] } + } + }, + "then": { + "properties": { + "database": { + "pattern": "[a-zA-Z0-9]{1,64}" + } + } + }, + "properties": { + "rootPassword": { + "type": "string", + "title": "MySQL root password", + "description": "Defaults to a random 10-character alphanumeric string if not set" + }, + "database": { + "type": "string", + "title": "MySQL custom database name", + "maxLength": 64 + }, + "username": { + "type": "string", + "title": "MySQL custom username" + }, + "password": { + "type": "string", + "title": "MySQL custom password" + }, + "replicationUser": { + "type": "string", + "title": "MySQL replication username" + }, + "replicationPassword": { + "type": "string", + "title": "MySQL replication password" + }, + "createDatabase": { + "type": "boolean", + "title": "MySQL create custom database" + } + } + }, + "primary": { + "type": "object", + "title": "Primary database configuration", + "form": true, + "properties": { + "podSecurityContext": { + "type": "object", + "title": "MySQL primary Pod security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "fsGroup": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "primary/podSecurityContext/enabled" + } + } + } + }, + "containerSecurityContext": { + "type": "object", + "title": "MySQL primary container security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "runAsUser": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "primary/containerSecurityContext/enabled" + } + } + } + }, + "persistence": { + "type": "object", + "title": "Enable persistence using Persistent Volume Claims", + "properties": { + "enabled": { + "type": "boolean", + "default": true, + "title": "If true, use a Persistent Volume Claim, If false, use emptyDir" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "primary/persistence/enabled" + } + } + } + } + } + }, + "secondary": { + "type": "object", + "title": "Secondary database configuration", + "form": true, + "properties": { + "podSecurityContext": { + "type": "object", + "title": "MySQL secondary Pod security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "fsGroup": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "secondary/podSecurityContext/enabled" + } + } + } + }, + "containerSecurityContext": { + "type": "object", + "title": "MySQL secondary container security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "runAsUser": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "secondary/containerSecurityContext/enabled" + } + } + } + }, + "persistence": { + "type": "object", + "title": "Enable persistence using Persistent Volume Claims", + "properties": { + "enabled": { + "type": "boolean", + "default": true, + "title": "If true, use a Persistent Volume Claim, If false, use emptyDir" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "secondary/persistence/enabled" + } + } + } + } + } + } + } +} diff --git a/charts/modules/mysql/values.yaml b/charts/modules/mysql/values.yaml new file mode 100644 index 0000000..e153de6 --- /dev/null +++ b/charts/modules/mysql/values.yaml @@ -0,0 +1,1208 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param clusterDomain Cluster domain +## +clusterDomain: cluster.local +## @param commonAnnotations Common annotations to add to all MySQL resources (sub-charts are not considered). Evaluated as a template +## +commonAnnotations: {} +## @param commonLabels Common labels to add to all MySQL resources (sub-charts are not considered). Evaluated as a template +## +commonLabels: {} +## @param extraDeploy Array with extra yaml to deploy with the chart. Evaluated as a template +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section MySQL common parameters + +## Bitnami MySQL image +## ref: https://hub.docker.com/r/bitnami/mysql/tags/ +## @param image.registry MySQL image registry +## @param image.repository MySQL image repository +## @param image.tag MySQL image tag (immutable tags are recommended) +## @param image.digest MySQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy MySQL image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug logs should be enabled +## +image: + registry: docker.io + repository: bitnami/mysql + tag: 8.0.31-debian-11-r10 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false +## @param architecture MySQL architecture (`standalone` or `replication`) +## +architecture: standalone +## MySQL Authentication parameters +## +auth: + ## @param auth.rootPassword Password for the `root` user. Ignored if existing secret is provided + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#setting-the-root-password-on-first-run + ## + rootPassword: "" + ## @param auth.createDatabase Wheter to create the .Values.auth.database or not + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#creating-a-database-on-first-run + ## + createDatabase: true + ## @param auth.database Name for a custom database to create + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#creating-a-database-on-first-run + ## + database: "my_database" + ## @param auth.username Name for a custom user to create + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#creating-a-database-user-on-first-run + ## + username: "" + ## @param auth.password Password for the new user. Ignored if existing secret is provided + ## + password: "" + ## @param auth.replicationUser MySQL replication user + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mysql#setting-up-a-replication-cluster + ## + replicationUser: replicator + ## @param auth.replicationPassword MySQL replication user password. Ignored if existing secret is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Use existing secret for password details. The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password` + ## NOTE: When it's set the auth.rootPassword, auth.password, auth.replicationPassword are ignored. + ## + existingSecret: "" + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + ## @param auth.customPasswordFiles Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication` + ## Example: + ## customPasswordFiles: + ## root: /vault/secrets/mysql-root + ## user: /vault/secrets/mysql-user + ## replicator: /vault/secrets/mysql-replicator + ## + customPasswordFiles: {} +## @param initdbScripts Dictionary of initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +## +initdbScripts: {} +## @param initdbScriptsConfigMap ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) +## +initdbScriptsConfigMap: "" + +## @section MySQL Primary parameters + +primary: + ## @param primary.name Name of the primary database (eg primary, master, leader, ...) + ## + name: primary + ## @param primary.command Override default container command on MySQL Primary container(s) (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args on MySQL Primary container(s) (useful when using custom images) + ## + args: [] + ## @param primary.lifecycleHooks for the MySQL Primary container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param primary.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.configuration [string] Configure MySQL Primary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + plugin_dir=/opt/bitnami/mysql/lib/plugin + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=* + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + slow_query_log=0 + slow_query_log_file=/opt/bitnami/mysql/logs/mysqld.log + long_query_time=10.0 + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/lib/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + ## @param primary.existingConfigmap Name of existing ConfigMap with MySQL Primary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + existingConfigmap: "" + ## @param primary.updateStrategy.type Update strategy type for the MySQL primary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param primary.podAnnotations Additional pod annotations for MySQL primary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param primary.podAffinityPreset MySQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset MySQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## MySQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type MySQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key MySQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values MySQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.priorityClassName MySQL primary pods' priorityClassName + ## + priorityClassName: "" + ## @param primary.runtimeClassName MySQL primary pods' runtimeClassName + ## + runtimeClassName: "" + ## @param primary.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds In seconds, time the given to the MySQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## The value is evaluated as a template + ## + topologySpreadConstraints: [] + ## @param primary.podManagementPolicy podManagementPolicy to manage scaling operation of MySQL primary pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## MySQL primary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param primary.podSecurityContext.enabled Enable security context for MySQL primary pods + ## @param primary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MySQL primary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param primary.containerSecurityContext.enabled MySQL primary container securityContext + ## @param primary.containerSecurityContext.runAsUser User ID for the MySQL primary container + ## @param primary.containerSecurityContext.runAsNonRoot Set MySQL primary container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## MySQL primary container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param primary.resources.limits The resources limits for MySQL primary containers + ## @param primary.resources.requests The requested resources for MySQL primary containers + ## + resources: + ## Example: + ## limits: + ## cpu: 250m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.readinessProbe.enabled Enable readinessProbe + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for startupProbe probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.startupProbe.enabled Enable startupProbe + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 10 + successThreshold: 1 + ## @param primary.customLivenessProbe Override default liveness probe for MySQL primary containers + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Override default readiness probe for MySQL primary containers + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Override default startup probe for MySQL primary containers + ## + customStartupProbe: {} + ## @param primary.extraFlags MySQL primary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + ## @param primary.extraEnvVars Extra environment variables to be set on MySQL primary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MySQL primary containers + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MySQL primary containers + ## + extraEnvVarsSecret: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param primary.persistence.enabled Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing `PersistentVolumeClaim` for MySQL primary replicas + ## NOTE: When it's set the rest of persistence parameters are ignored + ## + existingClaim: "" + ## @param primary.persistence.subPath The name of a volume's sub path to mount for persistence + ## + subPath: "" + ## @param primary.persistence.storageClass MySQL primary persistent volume storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.annotations MySQL primary persistent volume claim annotations + ## + annotations: {} + ## @param primary.persistence.accessModes MySQL primary persistent volume access Modes + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size MySQL primary persistent volume size + ## + size: 8Gi + ## @param primary.persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.extraVolumes Optionally specify extra list of additional volumes to the MySQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the MySQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.initContainers Add additional init containers for the MySQL Primary pod(s) + ## + initContainers: [] + ## @param primary.sidecars Add additional sidecar containers for the MySQL Primary pod(s) + ## + sidecars: [] + ## MySQL Primary Service parameters + ## + service: + ## @param primary.service.type MySQL Primary K8s service type + ## + type: ClusterIP + ## @param primary.service.ports.mysql MySQL Primary K8s service port + ## + ports: + mysql: 3306 + ## @param primary.service.nodePorts.mysql MySQL Primary K8s service node port + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + mysql: "" + ## @param primary.service.clusterIP MySQL Primary K8s service clusterIP IP + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.loadBalancerIP MySQL Primary loadBalancerIP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when MySQL Primary service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param primary.service.annotations Additional custom annotations for MySQL primary service + ## + annotations: {} + ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param primary.service.headless.annotations Additional custom annotations for headless MySQL primary service. + ## + annotations: {} + + ## MySQL primary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param primary.pdb.create Enable/disable a Pod Disruption Budget creation for MySQL primary pods + ## + create: false + ## @param primary.pdb.minAvailable Minimum number/percentage of MySQL primary pods that should remain scheduled + ## + minAvailable: 1 + ## @param primary.pdb.maxUnavailable Maximum number/percentage of MySQL primary pods that may be made unavailable + ## + maxUnavailable: "" + ## @param primary.podLabels MySQL Primary pod label. If labels are same as commonLabels , this will take precedence + ## + podLabels: {} + +## @section MySQL Secondary parameters + +secondary: + ## @param secondary.name Name of the secondary database (eg secondary, slave, ...) + ## + name: secondary + ## @param secondary.replicaCount Number of MySQL secondary replicas + ## + replicaCount: 1 + ## @param secondary.hostAliases Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param secondary.command Override default container command on MySQL Secondary container(s) (useful when using custom images) + ## + command: [] + ## @param secondary.args Override default container args on MySQL Secondary container(s) (useful when using custom images) + ## + args: [] + ## @param secondary.lifecycleHooks for the MySQL Secondary container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param secondary.configuration [string] Configure MySQL Secondary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + plugin_dir=/opt/bitnami/mysql/lib/plugin + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=* + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + slow_query_log=0 + slow_query_log_file=/opt/bitnami/mysql/logs/mysqld.log + long_query_time=10.0 + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/lib/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + ## @param secondary.existingConfigmap Name of existing ConfigMap with MySQL Secondary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + existingConfigmap: "" + ## @param secondary.updateStrategy.type Update strategy type for the MySQL secondary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## @param secondary.podAnnotations Additional pod annotations for MySQL secondary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param secondary.podAffinityPreset MySQL secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param secondary.podAntiAffinityPreset MySQL secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + ## MySQL Secondary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param secondary.nodeAffinityPreset.type MySQL secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param secondary.nodeAffinityPreset.key MySQL secondary node label key to match Ignored if `secondary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param secondary.nodeAffinityPreset.values MySQL secondary node label values to match. Ignored if `secondary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param secondary.affinity Affinity for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param secondary.nodeSelector Node labels for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param secondary.tolerations Tolerations for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param secondary.priorityClassName MySQL secondary pods' priorityClassName + ## + priorityClassName: "" + ## @param secondary.runtimeClassName MySQL secondary pods' runtimeClassName + ## + runtimeClassName: "" + ## @param secondary.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param secondary.terminationGracePeriodSeconds In seconds, time the given to the MySQL secondary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param secondary.topologySpreadConstraints Topology Spread Constraints for pod assignment + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## The value is evaluated as a template + ## + topologySpreadConstraints: [] + ## @param secondary.podManagementPolicy podManagementPolicy to manage scaling operation of MySQL secondary pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## MySQL secondary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param secondary.podSecurityContext.enabled Enable security context for MySQL secondary pods + ## @param secondary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MySQL secondary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param secondary.containerSecurityContext.enabled MySQL secondary container securityContext + ## @param secondary.containerSecurityContext.runAsUser User ID for the MySQL secondary container + ## @param secondary.containerSecurityContext.runAsNonRoot Set MySQL secondary container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## MySQL secondary container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param secondary.resources.limits The resources limits for MySQL secondary containers + ## @param secondary.resources.requests The requested resources for MySQL secondary containers + ## + resources: + ## Example: + ## limits: + ## cpu: 250m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.livenessProbe.enabled Enable livenessProbe + ## @param secondary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param secondary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param secondary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param secondary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param secondary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.readinessProbe.enabled Enable readinessProbe + ## @param secondary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param secondary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param secondary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param secondary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param secondary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for startupProbe probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.startupProbe.enabled Enable startupProbe + ## @param secondary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param secondary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param secondary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param secondary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param secondary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param secondary.customLivenessProbe Override default liveness probe for MySQL secondary containers + ## + customLivenessProbe: {} + ## @param secondary.customReadinessProbe Override default readiness probe for MySQL secondary containers + ## + customReadinessProbe: {} + ## @param secondary.customStartupProbe Override default startup probe for MySQL secondary containers + ## + customStartupProbe: {} + ## @param secondary.extraFlags MySQL secondary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + ## @param secondary.extraEnvVars An array to add extra environment variables on MySQL secondary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + ## @param secondary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MySQL secondary containers + ## + extraEnvVarsCM: "" + ## @param secondary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MySQL secondary containers + ## + extraEnvVarsSecret: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param secondary.persistence.enabled Enable persistence on MySQL secondary replicas using a `PersistentVolumeClaim` + ## + enabled: true + ## @param secondary.persistence.existingClaim Name of an existing `PersistentVolumeClaim` for MySQL secondary replicas + ## NOTE: When it's set the rest of persistence parameters are ignored + ## + existingClaim: "" + ## @param secondary.persistence.subPath The name of a volume's sub path to mount for persistence + ## + subPath: "" + ## @param secondary.persistence.storageClass MySQL secondary persistent volume storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param secondary.persistence.annotations MySQL secondary persistent volume claim annotations + ## + annotations: {} + ## @param secondary.persistence.accessModes MySQL secondary persistent volume access Modes + ## + accessModes: + - ReadWriteOnce + ## @param secondary.persistence.size MySQL secondary persistent volume size + ## + size: 8Gi + ## @param secondary.persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param secondary.extraVolumes Optionally specify extra list of additional volumes to the MySQL secondary pod(s) + ## + extraVolumes: [] + ## @param secondary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the MySQL secondary container(s) + ## + extraVolumeMounts: [] + ## @param secondary.initContainers Add additional init containers for the MySQL secondary pod(s) + ## + initContainers: [] + ## @param secondary.sidecars Add additional sidecar containers for the MySQL secondary pod(s) + ## + sidecars: [] + ## MySQL Secondary Service parameters + ## + service: + ## @param secondary.service.type MySQL secondary Kubernetes service type + ## + type: ClusterIP + ## @param secondary.service.ports.mysql MySQL secondary Kubernetes service port + ## + ports: + mysql: 3306 + ## @param secondary.service.nodePorts.mysql MySQL secondary Kubernetes service node port + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + mysql: "" + ## @param secondary.service.clusterIP MySQL secondary Kubernetes service clusterIP IP + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param secondary.service.loadBalancerIP MySQL secondary loadBalancerIP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param secondary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param secondary.service.loadBalancerSourceRanges Addresses that are allowed when MySQL secondary service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param secondary.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param secondary.service.annotations Additional custom annotations for MySQL secondary service + ## + annotations: {} + ## @param secondary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param secondary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param secondary.service.headless.annotations Additional custom annotations for headless MySQL secondary service. + ## + annotations: {} + + ## MySQL secondary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param secondary.pdb.create Enable/disable a Pod Disruption Budget creation for MySQL secondary pods + ## + create: false + ## @param secondary.pdb.minAvailable Minimum number/percentage of MySQL secondary pods that should remain scheduled + ## + minAvailable: 1 + ## @param secondary.pdb.maxUnavailable Maximum number/percentage of MySQL secondary pods that may be made unavailable + ## + maxUnavailable: "" + ## @param secondary.podLabels Additional pod labels for MySQL secondary pods + ## + podLabels: {} + +## @section RBAC parameters + +## MySQL pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable the creation of a ServiceAccount for MySQL pods + ## + create: true + ## @param serviceAccount.name Name of the created ServiceAccount + ## If not set and create is true, a name is generated using the mysql.fullname template + ## + name: "" + ## @param serviceAccount.annotations Annotations for MySQL Service Account + ## + annotations: {} + ## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## + automountServiceAccountToken: true + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] + +## @section Network Policy + +## MySQL Nework Policy configuration +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal The Policy model to apply. + ## When set to false, only pods with the correct + ## client label will have network access to the port MySQL is listening + ## on. When true, MySQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed to MySQL + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## @section Volume Permissions parameters + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r50 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param volumePermissions.resources Init container volume-permissions resources + ## + resources: {} + +## @section Metrics parameters + +## Mysqld Prometheus exporter parameters +## +metrics: + ## @param metrics.enabled Start a side-car prometheus exporter + ## + enabled: false + ## @param metrics.image.registry Exporter image registry + ## @param metrics.image.repository Exporter image repository + ## @param metrics.image.tag Exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy Exporter image pull policy + ## @param metrics.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.14.0-debian-11-r55 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## MySQL Prometheus exporter service parameters + ## Mysqld Prometheus exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.service.type Kubernetes service type for MySQL Prometheus Exporter + ## @param metrics.service.port MySQL Prometheus Exporter service port + ## @param metrics.service.annotations [object] Prometheus exporter service annotations + ## + service: + type: ClusterIP + port: 9104 + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + ## @param metrics.extraArgs.primary Extra args to be passed to mysqld_exporter on Primary pods + ## @param metrics.extraArgs.secondary Extra args to be passed to mysqld_exporter on Secondary pods + ## ref: https://github.com/prometheus/mysqld_exporter/ + ## E.g. + ## - --collect.auto_increment.columns + ## - --collect.binlog_size + ## - --collect.engine_innodb_status + ## - --collect.engine_tokudb_status + ## - --collect.global_status + ## - --collect.global_variables + ## - --collect.info_schema.clientstats + ## - --collect.info_schema.innodb_metrics + ## - --collect.info_schema.innodb_tablespaces + ## - --collect.info_schema.innodb_cmp + ## - --collect.info_schema.innodb_cmpmem + ## - --collect.info_schema.processlist + ## - --collect.info_schema.processlist.min_time + ## - --collect.info_schema.query_response_time + ## - --collect.info_schema.tables + ## - --collect.info_schema.tables.databases + ## - --collect.info_schema.tablestats + ## - --collect.info_schema.userstats + ## - --collect.perf_schema.eventsstatements + ## - --collect.perf_schema.eventsstatements.digest_text_limit + ## - --collect.perf_schema.eventsstatements.limit + ## - --collect.perf_schema.eventsstatements.timelimit + ## - --collect.perf_schema.eventswaits + ## - --collect.perf_schema.file_events + ## - --collect.perf_schema.file_instances + ## - --collect.perf_schema.indexiowaits + ## - --collect.perf_schema.tableiowaits + ## - --collect.perf_schema.tablelocks + ## - --collect.perf_schema.replication_group_member_stats + ## - --collect.slave_status + ## - --collect.slave_hosts + ## - --collect.heartbeat + ## - --collect.heartbeat.database + ## - --collect.heartbeat.table + ## + extraArgs: + primary: [] + secondary: [] + ## Mysqld Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param metrics.resources.limits The resources limits for MySQL prometheus exporter containers + ## @param metrics.resources.requests The requested resources for MySQL prometheus exporter containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 256Mi + requests: {} + ## Mysqld Prometheus exporter liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## Mysqld Prometheus exporter readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.readinessProbe.enabled Enable readinessProbe + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.labels Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + labels: {} + ## @param metrics.serviceMonitor.annotations ServiceMonitor annotations + ## + annotations: {} + + ## Prometheus Operator prometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Creates a Prometheus Operator prometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the prometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Prometheus Rule definitions + ## - alert: Mysql-Down + ## expr: absent(up{job="mysql"} == 1) + ## for: 5m + ## labels: + ## severity: warning + ## service: mariadb + ## annotations: + ## message: 'MariaDB instance {{`{{`}} $labels.instance {{`}}`}} is down' + ## summary: MariaDB instance is down + ## + rules: [] diff --git a/charts/modules/postgresql/.helmignore b/charts/modules/postgresql/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/charts/modules/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/modules/postgresql/Chart.lock b/charts/modules/postgresql/Chart.lock new file mode 100644 index 0000000..ab8d79c --- /dev/null +++ b/charts/modules/postgresql/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.1.2 +digest: sha256:1c365a4551a2f4098e9584dc176b289c10437c679c7c3e2ec6153cabf863e1a4 +generated: "2022-11-01T11:07:06.309318341Z" diff --git a/charts/modules/postgresql/Chart.yaml b/charts/modules/postgresql/Chart.yaml new file mode 100644 index 0000000..310ff0a --- /dev/null +++ b/charts/modules/postgresql/Chart.yaml @@ -0,0 +1,30 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 15.1.0 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: PostgreSQL (Postgres) is an open source object-relational database known + for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, + views, triggers and stored procedures. +home: https://github.com/bitnami/charts/tree/main/bitnami/postgresql +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-220x234.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: postgresql +sources: +- https://github.com/bitnami/containers/tree/main/bitnami/postgresql +- https://www.postgresql.org/ +version: 12.1.2 diff --git a/charts/modules/postgresql/README.md b/charts/modules/postgresql/README.md new file mode 100644 index 0000000..6007c0f --- /dev/null +++ b/charts/modules/postgresql/README.md @@ -0,0 +1,693 @@ + + +# PostgreSQL packaged by Bitnami + +PostgreSQL (Postgres) is an open source object-relational database known for reliability and data integrity. ACID-compliant, it supports foreign keys, joins, views, triggers and stored procedures. + +[Overview of PostgreSQL](http://www.postgresql.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```bash +helm repo add my-repo https://charts.bitnami.com/bitnami +helm install my-release my-repo/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/main/bitnami/postgresql-ha) + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +helm install my-release my-repo/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```bash +kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.postgresql.auth.postgresPassword` | Password for the "postgres" admin user (overrides `auth.postgresPassword`) | `""` | +| `global.postgresql.auth.username` | Name for a custom user to create (overrides `auth.username`) | `""` | +| `global.postgresql.auth.password` | Password for the custom user to create (overrides `auth.password`) | `""` | +| `global.postgresql.auth.database` | Name for a custom database to create (overrides `auth.database`) | `""` | +| `global.postgresql.auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). | `""` | +| `global.postgresql.auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` | +| `global.postgresql.auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` | +| `global.postgresql.auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. | `""` | +| `global.postgresql.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### PostgreSQL common parameters + +| Name | Description | Value | +| ---------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `image.registry` | PostgreSQL image registry | `docker.io` | +| `image.repository` | PostgreSQL image repository | `bitnami/postgresql` | +| `image.tag` | PostgreSQL image tag (immutable tags are recommended) | `15.1.0-debian-11-r0` | +| `image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | PostgreSQL image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify image pull secrets | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.enablePostgresUser` | Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user | `true` | +| `auth.postgresPassword` | Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided | `""` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided | `""` | +| `auth.database` | Name for a custom database to create | `""` | +| `auth.replicationUsername` | Name of the replication user | `repl_user` | +| `auth.replicationPassword` | Password for the replication user. Ignored if `auth.existingSecret` with key `replication-password` is provided | `""` | +| `auth.existingSecret` | Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. | `""` | +| `auth.secretKeys.adminPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `postgres-password` | +| `auth.secretKeys.userPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `password` | +| `auth.secretKeys.replicationPasswordKey` | Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. | `replication-password` | +| `auth.usePasswordFiles` | Mount credentials as a files instead of using an environment variable | `false` | +| `architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `containerPorts.postgresql` | PostgreSQL container port | `5432` | +| `audit.logHostname` | Log client hostnames | `false` | +| `audit.logConnections` | Add client log-in operations to the log file | `false` | +| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` | +| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `""` | +| `audit.pgAuditLogCatalog` | Log catalog using pgAudit | `off` | +| `audit.clientMinMessages` | Message log level to share with the user | `error` | +| `audit.logLinePrefix` | Template for log line prefix (default if not set) | `""` | +| `audit.logTimezone` | Timezone for the log timestamps | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.server` | IP address or name of the LDAP server. | `""` | +| `ldap.port` | Port number on the LDAP server to connect to | `""` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `""` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `""` | +| `ldap.basedn` | Root DN to begin the search for the user in | `""` | +| `ldap.binddn` | DN of user to bind to LDAP | `""` | +| `ldap.bindpw` | Password for the user to bind to LDAP | `""` | +| `ldap.searchAttribute` | Attribute to match against the user name in the search | `""` | +| `ldap.searchFilter` | The search filter to use when doing search+bind authentication | `""` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS | `""` | +| `ldap.tls.enabled` | Se to true to enable TLS encryption | `false` | +| `ldap.uri` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. | `""` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql/data` | +| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) | `true` | +| `shmVolume.sizeLimit` | Set this to enable a size limit on the shm tmpfs | `""` | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename | `""` | +| `tls.crlFilename` | File containing a Certificate Revocation List | `""` | + + +### PostgreSQL Primary parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `primary.name` | Name of the primary database (eg primary, master, leader, ...) | `primary` | +| `primary.configuration` | PostgreSQL Primary main configuration to be injected as ConfigMap | `""` | +| `primary.pgHbaConfiguration` | PostgreSQL Primary client authentication configuration | `""` | +| `primary.existingConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary configuration | `""` | +| `primary.extendedConfiguration` | Extended PostgreSQL Primary configuration (appended to main or default configuration) | `""` | +| `primary.existingExtendedConfigmap` | Name of an existing ConfigMap with PostgreSQL Primary extended configuration | `""` | +| `primary.initdb.args` | PostgreSQL initdb extra arguments | `""` | +| `primary.initdb.postgresqlWalDir` | Specify a custom location for the PostgreSQL transaction log | `""` | +| `primary.initdb.scripts` | Dictionary of initdb scripts | `{}` | +| `primary.initdb.scriptsConfigMap` | ConfigMap with scripts to be run at first boot | `""` | +| `primary.initdb.scriptsSecret` | Secret with scripts to be run at first boot (in case it contains sensitive information) | `""` | +| `primary.initdb.user` | Specify the PostgreSQL username to execute the initdb scripts | `""` | +| `primary.initdb.password` | Specify the PostgreSQL password to execute the initdb scripts | `""` | +| `primary.standby.enabled` | Whether to enable current cluster's primary as standby server of another cluster or not | `false` | +| `primary.standby.primaryHost` | The Host of replication primary in the other cluster | `""` | +| `primary.standby.primaryPort` | The Port of replication primary in the other cluster | `""` | +| `primary.extraEnvVars` | Array with extra environment variables to add to PostgreSQL Primary nodes | `[]` | +| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes | `""` | +| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL Primary nodes | `""` | +| `primary.command` | Override default container command (useful when using custom images) | `[]` | +| `primary.args` | Override default container args (useful when using custom images) | `[]` | +| `primary.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Primary containers | `true` | +| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `primary.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Primary containers | `true` | +| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `primary.startupProbe.enabled` | Enable startupProbe on PostgreSQL Primary containers | `false` | +| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `primary.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `primary.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `primary.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `primary.lifecycleHooks` | for the PostgreSQL Primary container to automate configuration before or after startup | `{}` | +| `primary.resources.limits` | The resources limits for the PostgreSQL Primary containers | `{}` | +| `primary.resources.requests.memory` | The requested memory for the PostgreSQL Primary containers | `256Mi` | +| `primary.resources.requests.cpu` | The requested cpu for the PostgreSQL Primary containers | `250m` | +| `primary.podSecurityContext.enabled` | Enable security context | `true` | +| `primary.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | +| `primary.containerSecurityContext.enabled` | Enable container security context | `true` | +| `primary.containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `primary.hostAliases` | PostgreSQL primary pods host aliases | `[]` | +| `primary.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `primary.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `primary.labels` | Map of labels to add to the statefulset (postgresql primary) | `{}` | +| `primary.annotations` | Annotations for PostgreSQL primary pods | `{}` | +| `primary.podLabels` | Map of labels to add to the pods (postgresql primary) | `{}` | +| `primary.podAnnotations` | Map of annotations to add to the pods (postgresql primary) | `{}` | +| `primary.podAffinityPreset` | PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.podAntiAffinityPreset` | PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `primary.nodeAffinityPreset.type` | PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.nodeAffinityPreset.key` | PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. | `""` | +| `primary.nodeAffinityPreset.values` | PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `primary.affinity` | Affinity for PostgreSQL primary pods assignment | `{}` | +| `primary.nodeSelector` | Node labels for PostgreSQL primary pods assignment | `{}` | +| `primary.tolerations` | Tolerations for PostgreSQL primary pods assignment | `[]` | +| `primary.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `primary.priorityClassName` | Priority Class to use for each pod (postgresql primary) | `""` | +| `primary.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `primary.terminationGracePeriodSeconds` | Seconds PostgreSQL primary pod needs to terminate gracefully | `""` | +| `primary.updateStrategy.type` | PostgreSQL Primary statefulset strategy type | `RollingUpdate` | +| `primary.updateStrategy.rollingUpdate` | PostgreSQL Primary statefulset rolling update configuration parameters | `{}` | +| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) | `[]` | +| `primary.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) | `[]` | +| `primary.sidecars` | Add additional sidecar containers to the PostgreSQL Primary pod(s) | `[]` | +| `primary.initContainers` | Add additional init containers to the PostgreSQL Primary pod(s) | `[]` | +| `primary.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) | `{}` | +| `primary.service.type` | Kubernetes Service type | `ClusterIP` | +| `primary.service.ports.postgresql` | PostgreSQL service port | `5432` | +| `primary.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `primary.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `primary.service.annotations` | Annotations for PostgreSQL primary service | `{}` | +| `primary.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` | +| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `primary.service.extraPorts` | Extra ports to expose in the PostgreSQL primary service | `[]` | +| `primary.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `primary.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `primary.service.headless.annotations` | Additional custom annotations for headless PostgreSQL primary service | `{}` | +| `primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `true` | +| `primary.persistence.existingClaim` | Name of an existing PVC to use | `""` | +| `primary.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | +| `primary.persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `primary.persistence.storageClass` | PVC Storage Class for PostgreSQL Primary data volume | `""` | +| `primary.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` | +| `primary.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `primary.persistence.annotations` | Annotations for the PVC | `{}` | +| `primary.persistence.labels` | Labels for the PVC | `{}` | +| `primary.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `primary.persistence.dataSource` | Custom PVC data source | `{}` | + + +### PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`) + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `readReplicas.name` | Name of the read replicas database (eg secondary, slave, ...) | `read` | +| `readReplicas.replicaCount` | Number of PostgreSQL read only replicas | `1` | +| `readReplicas.extendedConfiguration` | Extended PostgreSQL read only replicas configuration (appended to main or default configuration) | `""` | +| `readReplicas.extraEnvVars` | Array with extra environment variables to add to PostgreSQL read only nodes | `[]` | +| `readReplicas.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes | `""` | +| `readReplicas.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for PostgreSQL read only nodes | `""` | +| `readReplicas.command` | Override default container command (useful when using custom images) | `[]` | +| `readReplicas.args` | Override default container args (useful when using custom images) | `[]` | +| `readReplicas.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL read only containers | `true` | +| `readReplicas.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `readReplicas.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `readReplicas.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `readReplicas.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `readReplicas.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readReplicas.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL read only containers | `true` | +| `readReplicas.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readReplicas.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readReplicas.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readReplicas.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readReplicas.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readReplicas.startupProbe.enabled` | Enable startupProbe on PostgreSQL read only containers | `false` | +| `readReplicas.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `readReplicas.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `readReplicas.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `readReplicas.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `readReplicas.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `readReplicas.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `readReplicas.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `readReplicas.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `readReplicas.lifecycleHooks` | for the PostgreSQL read only container to automate configuration before or after startup | `{}` | +| `readReplicas.resources.limits` | The resources limits for the PostgreSQL read only containers | `{}` | +| `readReplicas.resources.requests.memory` | The requested memory for the PostgreSQL read only containers | `256Mi` | +| `readReplicas.resources.requests.cpu` | The requested cpu for the PostgreSQL read only containers | `250m` | +| `readReplicas.podSecurityContext.enabled` | Enable security context | `true` | +| `readReplicas.podSecurityContext.fsGroup` | Group ID for the pod | `1001` | +| `readReplicas.containerSecurityContext.enabled` | Enable container security context | `true` | +| `readReplicas.containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `readReplicas.hostAliases` | PostgreSQL read only pods host aliases | `[]` | +| `readReplicas.hostNetwork` | Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) | `false` | +| `readReplicas.hostIPC` | Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) | `false` | +| `readReplicas.labels` | Map of labels to add to the statefulset (PostgreSQL read only) | `{}` | +| `readReplicas.annotations` | Annotations for PostgreSQL read only pods | `{}` | +| `readReplicas.podLabels` | Map of labels to add to the pods (PostgreSQL read only) | `{}` | +| `readReplicas.podAnnotations` | Map of annotations to add to the pods (PostgreSQL read only) | `{}` | +| `readReplicas.podAffinityPreset` | PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `readReplicas.podAntiAffinityPreset` | PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `readReplicas.nodeAffinityPreset.type` | PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `readReplicas.nodeAffinityPreset.key` | PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. | `""` | +| `readReplicas.nodeAffinityPreset.values` | PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `readReplicas.affinity` | Affinity for PostgreSQL read only pods assignment | `{}` | +| `readReplicas.nodeSelector` | Node labels for PostgreSQL read only pods assignment | `{}` | +| `readReplicas.tolerations` | Tolerations for PostgreSQL read only pods assignment | `[]` | +| `readReplicas.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `readReplicas.priorityClassName` | Priority Class to use for each pod (PostgreSQL read only) | `""` | +| `readReplicas.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `readReplicas.terminationGracePeriodSeconds` | Seconds PostgreSQL read only pod needs to terminate gracefully | `""` | +| `readReplicas.updateStrategy.type` | PostgreSQL read only statefulset strategy type | `RollingUpdate` | +| `readReplicas.updateStrategy.rollingUpdate` | PostgreSQL read only statefulset rolling update configuration parameters | `{}` | +| `readReplicas.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) | `[]` | +| `readReplicas.extraVolumes` | Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.sidecars` | Add additional sidecar containers to the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.initContainers` | Add additional init containers to the PostgreSQL read only pod(s) | `[]` | +| `readReplicas.extraPodSpec` | Optionally specify extra PodSpec for the PostgreSQL read only pod(s) | `{}` | +| `readReplicas.service.type` | Kubernetes Service type | `ClusterIP` | +| `readReplicas.service.ports.postgresql` | PostgreSQL service port | `5432` | +| `readReplicas.service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `readReplicas.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `readReplicas.service.annotations` | Annotations for PostgreSQL read only service | `{}` | +| `readReplicas.service.loadBalancerIP` | Load balancer IP if service type is `LoadBalancer` | `""` | +| `readReplicas.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `readReplicas.service.loadBalancerSourceRanges` | Addresses that are allowed when service is LoadBalancer | `[]` | +| `readReplicas.service.extraPorts` | Extra ports to expose in the PostgreSQL read only service | `[]` | +| `readReplicas.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `readReplicas.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `readReplicas.service.headless.annotations` | Additional custom annotations for headless PostgreSQL read only service | `{}` | +| `readReplicas.persistence.enabled` | Enable PostgreSQL read only data persistence using PVC | `true` | +| `readReplicas.persistence.existingClaim` | Name of an existing PVC to use | `""` | +| `readReplicas.persistence.mountPath` | The path the volume will be mounted at | `/bitnami/postgresql` | +| `readReplicas.persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `readReplicas.persistence.storageClass` | PVC Storage Class for PostgreSQL read only data volume | `""` | +| `readReplicas.persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `["ReadWriteOnce"]` | +| `readReplicas.persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `readReplicas.persistence.annotations` | Annotations for the PVC | `{}` | +| `readReplicas.persistence.labels` | Labels for the PVC | `{}` | +| `readReplicas.persistence.selector` | Selector to match an existing Persistent Volume (this value is evaluated as a template) | `{}` | +| `readReplicas.persistence.dataSource` | Custom PVC data source | `{}` | + + +### NetworkPolicy parameters + +| Name | Description | Value | +| ------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable network policies | `false` | +| `networkPolicy.metrics.enabled` | Enable network policies for metrics (prometheus) | `false` | +| `networkPolicy.metrics.namespaceSelector` | Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. | `{}` | +| `networkPolicy.metrics.podSelector` | Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. | `false` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). | `{}` | +| `networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL primary node. | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled` | Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. | `false` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector` | Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector` | Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). | `{}` | +| `networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules` | Custom network policy for the PostgreSQL read-only nodes. | `{}` | +| `networkPolicy.egressRules.denyConnectionsToExternal` | Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). | `false` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r50` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for PostgreSQL pod | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | +| `psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | + + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------------- | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.image.registry` | PostgreSQL Prometheus Exporter image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Prometheus Exporter image repository | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) | `0.11.1-debian-11-r27` | +| `metrics.image.digest` | PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | PostgreSQL Prometheus Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify image pull secrets | `[]` | +| `metrics.customMetrics` | Define additional custom metrics | `{}` | +| `metrics.extraEnvVars` | Extra environment variables to add to PostgreSQL Prometheus exporter | `[]` | +| `metrics.containerSecurityContext.enabled` | Enable PostgreSQL Prometheus exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set PostgreSQL Prometheus exporter containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe on PostgreSQL Prometheus exporter containers | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe on PostgreSQL Prometheus exporter containers | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `metrics.containerPorts.metrics` | PostgreSQL Prometheus exporter metrics container port | `9187` | +| `metrics.resources.limits` | The resources limits for the PostgreSQL Prometheus exporter container | `{}` | +| `metrics.resources.requests` | The requested resources for the PostgreSQL Prometheus exporter container | `{}` | +| `metrics.service.ports.metrics` | PostgreSQL Prometheus Exporter service port | `9187` | +| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.postgresPassword=secretpassword + my-repo/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +> **Warning** Setting a password will be ignored on new installation in case when previous Posgresql release was deleted through the helm command. In that case, old PVC will have an old password, and setting it through helm won't take effect. Deleting persistent volumes (PVs) will solve the issue. Refer to [issue 2061](https://github.com/bitnami/charts/issues/2061) for more details + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +helm install my-release -f values.yaml my-repo/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Customizing primary and read replica services in a replicated configuration + +At the top level, there is a service object which defines the services for both primary and readReplicas. For deeper customization, there are service objects for both the primary and read types individually. This allows you to override the values in the top level service object so that the primary and read can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the primary and read to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the primary.service or readReplicas.service objects will take precedence over the top level service object. + +### Use a different PostgreSQL version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/configuration/change-image-version/). + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the PostgreSQL configuration file. You can add additional PostgreSQL configuration parameters using the `primary.extendedConfiguration`/`readReplicas.extendedConfiguration` parameters as a string. Alternatively, to replace the entire default configuration use `primary.configuration`. + +You can also add a custom pg_hba.conf using the `primary.pgHbaConfiguration` parameter. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `primary.existingConfigmap` parameter. Note that this will override the two previous options. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, you can specify custom scripts using the `primary.initdb.scripts` parameter as a string. + +In addition, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `primary.initdb.scriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `primary.initdb.scriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +- First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +- Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL primary +primary: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +readReplicas: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.auth.username=testuser +subchart1.postgresql.auth.username=testuser +subchart2.postgresql.auth.username=testuser +postgresql.auth.password=testpass +subchart1.postgresql.auth.password=testpass +subchart2.postgresql.auth.password=testpass +postgresql.auth.database=testdb +subchart1.postgresql.auth.database=testdb +subchart2.postgresql.auth.database=testdb +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.auth.username=testuser +global.postgresql.auth.password=testpass +global.postgresql.auth.database=testdb +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/containers/tree/main/bitnami/postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to the [code present in the container repository](https://github.com/bitnami/containers/tree/main/bitnami/postgresql). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```bash +kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift up to 4.10, let set the volume permissions, security context, runAsUser and fsGroup automatically by OpenShift and disable the predefined settings of the helm chart: primary.securityContext.enabled=false,primary.containerSecurityContext.enabled=false,volumePermissions.enabled=false,shmVolume.enabled=false +- For OpenShift 4.11 and higher, let set OpenShift the runAsUser and fsGroup automatically. Configure the pod and container security context to restrictive defaults and disable the volume permissions setup: primary. + podSecurityContext.fsGroup=null,primary.podSecurityContext.seccompProfile.type=RuntimeDefault,primary.containerSecurityContext.runAsUser=null,primary.containerSecurityContext.allowPrivilegeEscalation=false,primary.containerSecurityContext.runAsNonRoot=true,primary.containerSecurityContext.seccompProfile.type=RuntimeDefault,primary.containerSecurityContext.capabilities.drop=['ALL'],volumePermissions.enabled=false,shmVolume.enabled=false + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 12.0.0 + +This major version changes the default PostgreSQL image from 14.x to 15.x. Follow the [official instructions](https://www.postgresql.org/docs/15/upgrading.html) to upgrade to 15.x. + +### To any previous version + +Refer to the [chart documentation for more information about how to upgrade from previous releases](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/administration/upgrade/). + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/charts/modules/postgresql/charts/common/.helmignore b/charts/modules/postgresql/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/charts/modules/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/modules/postgresql/charts/common/Chart.yaml b/charts/modules/postgresql/charts/common/Chart.yaml new file mode 100644 index 0000000..6f0c3a6 --- /dev/null +++ b/charts/modules/postgresql/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.1.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.1.2 diff --git a/charts/modules/postgresql/charts/common/README.md b/charts/modules/postgresql/charts/common/README.md new file mode 100644 index 0000000..a2ecd60 --- /dev/null +++ b/charts/modules/postgresql/charts/common/README.md @@ -0,0 +1,350 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/charts/modules/postgresql/charts/common/templates/_affinities.tpl b/charts/modules/postgresql/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..497068f --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_affinities.tpl @@ -0,0 +1,98 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_capabilities.tpl b/charts/modules/postgresql/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..9d9b760 --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_errors.tpl b/charts/modules/postgresql/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_images.tpl b/charts/modules/postgresql/charts/common/templates/_images.tpl new file mode 100644 index 0000000..46c659e --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_ingress.tpl b/charts/modules/postgresql/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..831da9c --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_labels.tpl b/charts/modules/postgresql/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_names.tpl b/charts/modules/postgresql/charts/common/templates/_names.tpl new file mode 100644 index 0000000..1bdac8b --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_secrets.tpl b/charts/modules/postgresql/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4267d42 --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_storage.tpl b/charts/modules/postgresql/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_tplvalues.tpl b/charts/modules/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_utils.tpl b/charts/modules/postgresql/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..8c22b2a --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/_warnings.tpl b/charts/modules/postgresql/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/validations/_cassandra.tpl b/charts/modules/postgresql/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/validations/_mariadb.tpl b/charts/modules/postgresql/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/validations/_mongodb.tpl b/charts/modules/postgresql/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..f820ec1 --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/validations/_mysql.tpl b/charts/modules/postgresql/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 0000000..74472a0 --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/validations/_postgresql.tpl b/charts/modules/postgresql/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/validations/_redis.tpl b/charts/modules/postgresql/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..dcccfc1 --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/templates/validations/_validations.tpl b/charts/modules/postgresql/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/charts/modules/postgresql/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/charts/common/values.yaml b/charts/modules/postgresql/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/charts/modules/postgresql/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/charts/modules/postgresql/templates/NOTES.txt b/charts/modules/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..e0474d4 --- /dev/null +++ b/charts/modules/postgresql/templates/NOTES.txt @@ -0,0 +1,89 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- /opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/postgresql/entrypoint.sh /opt/bitnami/scripts/postgresql/run.sh + +{{- else }} + +PostgreSQL can be accessed via port {{ include "postgresql.service.port" . }} on the following DNS names from within your cluster: + + {{ include "postgresql.primary.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection + +{{- if eq .Values.architecture "replication" }} + + {{ include "postgresql.readReplica.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read only connection + +{{- end }} + +{{- $customUser := include "postgresql.username" . }} +{{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.postgres-password}" | base64 -d) + +To get the password for "{{ $customUser }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.password}" | base64 -d) + +{{- else }} + +To get the password for "{{ default "postgres" $customUser }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "postgresql.secretName" . }} -o jsonpath="{.data.{{ ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres")) }}}" | base64 -d) + +{{- end }} + +To connect to your database run the following command: + + kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ include "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" \ + --command -- psql --host {{ include "postgresql.primary.fullname" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }} + + > NOTE: If you access the container using bash, make sure that you execute "/opt/bitnami/scripts/postgresql/entrypoint.sh /bin/bash" in order to avoid the error "psql: local user with ID {{ .Values.primary.containerSecurityContext.runAsUser }}} does not exist" + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.primary.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "postgresql.primary.fullname" . }}) + PGPASSWORD="$POSTGRES_PASSWORD" psql --host $NODE_IP --port $NODE_PORT -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.primary.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "postgresql.primary.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "postgresql.primary.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + PGPASSWORD="$POSTGRES_PASSWORD" psql --host $SERVICE_IP --port {{ include "postgresql.service.port" . }} -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.primary.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "postgresql.primary.fullname" . }} {{ include "postgresql.service.port" . }}:{{ include "postgresql.service.port" . }} & + PGPASSWORD="$POSTGRES_PASSWORD" psql --host 127.0.0.1 -U {{ default "postgres" $customUser }} -d {{- if include "postgresql.database" . }} {{ include "postgresql.database" . }}{{- else }} postgres{{- end }} -p {{ include "postgresql.service.port" . }} + +{{- end }} +{{- end }} + +{{- include "postgresql.validateValues" . -}} +{{- include "common.warnings.rollingTag" .Values.image -}} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} diff --git a/charts/modules/postgresql/templates/_helpers.tpl b/charts/modules/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..fe123f5 --- /dev/null +++ b/charts/modules/postgresql/templates/_helpers.tpl @@ -0,0 +1,399 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Create a default fully qualified app name for PostgreSQL Primary objects +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.primary.fullname" -}} +{{- if eq .Values.architecture "replication" }} + {{- printf "%s-%s" (include "common.names.fullname" .) .Values.primary.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} + {{- include "common.names.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name for PostgreSQL read-only replicas objects +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.readReplica.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) .Values.readReplicas.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the default FQDN for PostgreSQL primary headless service +We truncate at 63 chars because of the DNS naming spec. +*/}} +{{- define "postgresql.primary.svc.headless" -}} +{{- printf "%s-hl" (include "postgresql.primary.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* +Create the default FQDN for PostgreSQL read-only replicas headless service +We truncate at 63 chars because of the DNS naming spec. +*/}} +{{- define "postgresql.readReplica.svc.headless" -}} +{{- printf "%s-hl" (include "postgresql.readReplica.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the name for a custom user to create +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.auth.username }} + {{- .Values.global.postgresql.auth.username -}} +{{- else -}} + {{- .Values.auth.username -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name for a custom database to create +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.auth.database }} + {{- .Values.global.postgresql.auth.database -}} +{{- else if .Values.auth.database -}} + {{- .Values.auth.database -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.auth.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.auth.existingSecret $) -}} +{{- else if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the replication-password key. +*/}} +{{- define "postgresql.replicationPasswordKey" -}} +{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }} + {{- if .Values.global.postgresql.auth.secretKeys.replicationPasswordKey }} + {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.replicationPasswordKey $) -}} + {{- else if .Values.auth.secretKeys.replicationPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.secretKeys.replicationPasswordKey $) -}} + {{- else -}} + {{- "replication-password" -}} + {{- end -}} +{{- else -}} + {{- "replication-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the admin-password key. +*/}} +{{- define "postgresql.adminPasswordKey" -}} +{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }} + {{- if .Values.global.postgresql.auth.secretKeys.adminPasswordKey }} + {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.adminPasswordKey $) -}} + {{- else if .Values.auth.secretKeys.adminPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.secretKeys.adminPasswordKey $) -}} + {{- end -}} +{{- else -}} + {{- "postgres-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the user-password key. +*/}} +{{- define "postgresql.userPasswordKey" -}} +{{- if or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret }} + {{- if or (empty (include "postgresql.username" .)) (eq (include "postgresql.username" .) "postgres") }} + {{- printf "%s" (include "postgresql.adminPasswordKey" .) -}} + {{- else -}} + {{- if .Values.global.postgresql.auth.secretKeys.userPasswordKey }} + {{- printf "%s" (tpl .Values.global.postgresql.auth.secretKeys.userPasswordKey $) -}} + {{- else if .Values.auth.secretKeys.userPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.secretKeys.userPasswordKey $) -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- ternary "password" "postgres-password" (and (not (empty (include "postgresql.username" .))) (ne (include "postgresql.username" .) "postgres")) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if not (or .Values.global.postgresql.auth.existingSecret .Values.auth.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL service port +*/}} +{{- define "postgresql.service.port" -}} +{{- if .Values.global.postgresql.service.ports.postgresql }} + {{- .Values.global.postgresql.service.ports.postgresql -}} +{{- else -}} + {{- .Values.primary.service.ports.postgresql -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL service port +*/}} +{{- define "postgresql.readReplica.service.port" -}} +{{- if .Values.global.postgresql.service.ports.postgresql }} + {{- .Values.global.postgresql.service.ports.postgresql -}} +{{- else -}} + {{- .Values.readReplicas.service.ports.postgresql -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL primary configuration ConfigMap name. +*/}} +{{- define "postgresql.primary.configmapName" -}} +{{- if .Values.primary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL primary with the configuration +*/}} +{{- define "postgresql.primary.createConfigmap" -}} +{{- if and (or .Values.primary.configuration .Values.primary.pgHbaConfiguration) (not .Values.primary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL primary extended configuration ConfigMap name. +*/}} +{{- define "postgresql.primary.extendedConfigmapName" -}} +{{- if .Values.primary.existingExtendedConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingExtendedConfigmap $) -}} +{{- else -}} + {{- printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the PostgreSQL read replica extended configuration ConfigMap name. +*/}} +{{- define "postgresql.readReplicas.extendedConfigmapName" -}} + {{- printf "%s-extended-configuration" (include "postgresql.readReplica.fullname" .) -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL primary with the extended configuration +*/}} +{{- define "postgresql.primary.createExtendedConfigmap" -}} +{{- if and .Values.primary.extendedConfiguration (not .Values.primary.existingExtendedConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for PostgreSQL read replica with the extended configuration +*/}} +{{- define "postgresql.readReplicas.createExtendedConfigmap" -}} +{{- if .Values.readReplicas.extendedConfiguration }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "postgresql.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdb.scriptsCM" -}} +{{- if .Values.primary.initdb.scriptsConfigMap -}} + {{- printf "%s" (tpl .Values.primary.initdb.scriptsConfigMap $) -}} +{{- else -}} + {{- printf "%s-init-scripts" (include "postgresql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{/* +Return true if TLS is enabled for LDAP connection +*/}} +{{- define "postgresql.ldap.tls.enabled" -}} +{{- if and (kindIs "string" .Values.ldap.tls) (not (empty .Values.ldap.tls)) }} + {{- true -}} +{{- else if and (kindIs "map" .Values.ldap.tls) .Values.ldap.tls.enabled }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +{{- $customUser := include "postgresql.username" . }} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if .Values.tls.enabled }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} +{{- else }} + exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if .Values.tls.enabled }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/tls.crt" -}} +{{- else -}} + {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/tls.key" -}} +{{- else -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/postgresql/certs/ca.crt" -}} +{{- else -}} + {{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "postgresql.createTlsSecret" -}} +{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsSecretName" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- else -}} + {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} +{{- end -}} +{{- end -}} diff --git a/charts/modules/postgresql/templates/extra-list.yaml b/charts/modules/postgresql/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/charts/modules/postgresql/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/charts/modules/postgresql/templates/networkpolicy-egress.yaml b/charts/modules/postgresql/templates/networkpolicy-egress.yaml new file mode 100644 index 0000000..e862147 --- /dev/null +++ b/charts/modules/postgresql/templates/networkpolicy-egress.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.egressRules.denyConnectionsToExternal .Values.networkPolicy.egressRules.customRules) }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-egress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Egress + egress: + {{- if .Values.networkPolicy.egressRules.denyConnectionsToExternal }} + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + - to: + - namespaceSelector: {} + {{- end }} + {{- if .Values.networkPolicy.egressRules.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/modules/postgresql/templates/primary/configmap.yaml b/charts/modules/postgresql/templates/primary/configmap.yaml new file mode 100644 index 0000000..d654a22 --- /dev/null +++ b/charts/modules/postgresql/templates/primary/configmap.yaml @@ -0,0 +1,24 @@ +{{- if (include "postgresql.primary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- if .Values.primary.configuration }} + postgresql.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.pgHbaConfiguration }} + pg_hba.conf: | + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.pgHbaConfiguration "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/modules/postgresql/templates/primary/extended-configmap.yaml b/charts/modules/postgresql/templates/primary/extended-configmap.yaml new file mode 100644 index 0000000..d129bd3 --- /dev/null +++ b/charts/modules/postgresql/templates/primary/extended-configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "postgresql.primary.createExtendedConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extended-configuration" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + override.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extendedConfiguration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/charts/modules/postgresql/templates/primary/initialization-configmap.yaml b/charts/modules/postgresql/templates/primary/initialization-configmap.yaml new file mode 100644 index 0000000..d3d26cb --- /dev/null +++ b/charts/modules/postgresql/templates/primary/initialization-configmap.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.primary.initdb.scripts (not .Values.primary.initdb.scriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: {{- include "common.tplvalues.render" (dict "value" .Values.primary.initdb.scripts "context" .) | nindent 2 }} +{{- end }} diff --git a/charts/modules/postgresql/templates/primary/metrics-configmap.yaml b/charts/modules/postgresql/templates/primary/metrics-configmap.yaml new file mode 100644 index 0000000..8ad2f35 --- /dev/null +++ b/charts/modules/postgresql/templates/primary/metrics-configmap.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/charts/modules/postgresql/templates/primary/metrics-svc.yaml b/charts/modules/postgresql/templates/primary/metrics-svc.yaml new file mode 100644 index 0000000..75a1b81 --- /dev/null +++ b/charts/modules/postgresql/templates/primary/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/charts/modules/postgresql/templates/primary/networkpolicy.yaml b/charts/modules/postgresql/templates/primary/networkpolicy.yaml new file mode 100644 index 0000000..ce0052d --- /dev/null +++ b/charts/modules/postgresql/templates/primary/networkpolicy.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.networkPolicy.enabled (or .Values.networkPolicy.metrics.enabled .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled) }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-ingress" (include "postgresql.primary.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + ingress: + {{- if and .Values.metrics.enabled .Values.networkPolicy.metrics.enabled (or .Values.networkPolicy.metrics.namespaceSelector .Values.networkPolicy.metrics.podSelector) }} + - from: + {{- if .Values.networkPolicy.metrics.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.metrics.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.metrics.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.metrics.containerPorts.metrics }} + {{- end }} + {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector) }} + - from: + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if and .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled (eq .Values.architecture "replication") }} + - from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + app.kubernetes.io/component: read + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/modules/postgresql/templates/primary/servicemonitor.yaml b/charts/modules/postgresql/templates/primary/servicemonitor.yaml new file mode 100644 index 0000000..c4a19fe --- /dev/null +++ b/charts/modules/postgresql/templates/primary/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} +{{- end }} diff --git a/charts/modules/postgresql/templates/primary/statefulset.yaml b/charts/modules/postgresql/templates/primary/statefulset.yaml new file mode 100644 index 0000000..3fd77f4 --- /dev/null +++ b/charts/modules/postgresql/templates/primary/statefulset.yaml @@ -0,0 +1,634 @@ +{{- $customUser := include "postgresql.username" . }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.labels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + serviceName: {{ include "postgresql.primary.svc.headless" . }} + {{- if .Values.primary.updateStrategy }} + updateStrategy: {{- toYaml .Values.primary.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + template: + metadata: + name: {{ include "postgresql.primary.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "postgresql.primary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "postgresql.primary.createExtendedConfigmap" .) }} + checksum/extended-configuration: {{ include (print $.Template.BasePath "/primary/extended-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.primary.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.primary.extraPodSpec }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraPodSpec "context" $) | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "postgresql.serviceAccountName" . }} + {{- include "postgresql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.primary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.primary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.primary.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.primary.priorityClassName }} + priorityClassName: {{ .Values.primary.priorityClassName }} + {{- end }} + {{- if .Values.primary.schedulerName }} + schedulerName: {{ .Values.primary.schedulerName | quote }} + {{- end }} + {{- if .Values.primary.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.primary.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.primary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.primary.hostNetwork }} + hostIPC: {{ .Values.primary.hostIPC }} + initContainers: + {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.primary.resources }} + resources: {{- toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + # We don't require a privileged container in this case + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + chmod 600 {{ include "postgresql.tlsCertKey" . }} + volumeMounts: + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- else if and .Values.volumePermissions.enabled (or .Values.primary.persistence.enabled .Values.shmVolume.enabled) }} + - name: init-chmod-data + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + {{- if .Values.primary.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.primary.persistence.mountPath }} + {{- else }} + chown {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} {{ .Values.primary.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.primary.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.primary.persistence.mountPath }}/conf {{- end }} + find {{ .Values.primary.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ include "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.primary.persistence.enabled }} + - name: data + mountPath: {{ .Values.primary.persistence.mountPath }} + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.primary.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: postgresql + image: {{ include "postgresql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.primary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.primary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: {{ .Values.containerPorts.postgresql | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: {{ .Values.primary.persistence.mountPath | quote }} + {{- if .Values.primary.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + # Authentication + {{- if and (not (empty $customUser)) (ne $customUser "postgres") }} + - name: POSTGRES_USER + value: {{ $customUser | quote }} + {{- if .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.adminPasswordKey" . }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.userPasswordKey" . }} + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + # Replication + {{- if or (eq .Values.architecture "replication") .Values.primary.standby.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: {{ ternary "slave" "master" .Values.primary.standby.enabled | quote }} + - name: POSTGRES_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.replicationPasswordKey" . }} + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off") }} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + # Initdb + {{- if .Values.primary.initdb.args }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.primary.initdb.args | quote }} + {{- end }} + {{- if .Values.primary.initdb.postgresqlWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.primary.initdb.postgresqlWalDir | quote }} + {{- end }} + {{- if .Values.primary.initdb.user }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.primary.initdb.user }} + {{- end }} + {{- if .Values.primary.initdb.password }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.primary.initdb.password | quote }} + {{- end }} + # Standby + {{- if .Values.primary.standby.enabled }} + - name: POSTGRES_MASTER_HOST + value: {{ .Values.primary.standby.primaryHost }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ .Values.primary.standby.primaryPort | quote }} + {{- end }} + # LDAP + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + {{- if or .Values.ldap.url .Values.ldap.uri }} + - name: POSTGRESQL_LDAP_URL + value: {{ coalesce .Values.ldap.url .Values.ldap.uri }} + {{- else }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if (include "postgresql.ldap.tls.enabled" .) }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end }} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote }} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ coalesce .Values.ldap.baseDN .Values.ldap.basedn }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ coalesce .Values.ldap.bindDN .Values.ldap.binddn}} + {{- if or (not (empty .Values.ldap.bind_password)) (not (empty .Values.ldap.bindpw)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: ldap-password + {{- end }} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ coalesce .Values.ldap.search_attr .Values.ldap.searchAttribute }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ coalesce .Values.ldap.search_filter .Values.ldap.searchFilter }} + {{- end }} + {{- end }} + # TLS + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ include "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ include "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ include "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ include "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + # Audit + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + # Others + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.primary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.primary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.primary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.primary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.primary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ .Values.containerPorts.postgresql }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.primary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.primary.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- end }} + {{- if .Values.primary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.primary.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- end }} + {{- if .Values.primary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.primary.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.primary.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + {{- end }} + {{- end }} + {{- if .Values.primary.resources }} + resources: {{- toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + {{- if .Values.primary.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.primary.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.primary.initdb.scriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.primary.persistence.enabled }} + - name: data + mountPath: {{ .Values.primary.persistence.mountPath }} + {{- if .Values.primary.persistence.subPath }} + subPath: {{ .Values.primary.persistence.subPath }} + {{- end }} + {{- end }} + {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.primary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.customMetrics }} + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.database" .) }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.service.port" .)) $database }} + {{- if .Values.auth.usePasswordFiles }} + - name: DATA_SOURCE_PASS_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.userPasswordKey" . }} + {{- end }} + - name: DATA_SOURCE_USER + value: {{ default "postgres" $customUser | quote }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPorts.metrics }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- end }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.primary.configuration .Values.primary.pgHbaConfiguration .Values.primary.existingConfigmap }} + - name: postgresql-config + configMap: + name: {{ include "postgresql.primary.configmapName" . }} + {{- end }} + {{- if or .Values.primary.extendedConfiguration .Values.primary.existingExtendedConfigmap }} + - name: postgresql-extended-config + configMap: + name: {{ include "postgresql.primary.extendedConfigmapName" . }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.secretName" . }} + {{- end }} + {{- if or .Values.primary.initdb.scriptsConfigMap .Values.primary.initdb.scripts }} + - name: custom-init-scripts + configMap: + name: {{ include "postgresql.initdb.scriptsCM" . }} + {{- end }} + {{- if .Values.primary.initdb.scriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ tpl .Values.primary.initdb.scriptsSecret $ }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.tlsSecretName" . }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.primary.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.primary.persistence.existingClaim $ }} + {{- else if not .Values.primary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.primary.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.primary.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.primary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.primary.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + resources: + requests: + storage: {{ .Values.primary.persistence.size | quote }} + {{- if .Values.primary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) | nindent 8 }} + {{- end }} diff --git a/charts/modules/postgresql/templates/primary/svc-headless.yaml b/charts/modules/postgresql/templates/primary/svc-headless.yaml new file mode 100644 index 0000000..684177a --- /dev/null +++ b/charts/modules/postgresql/templates/primary/svc-headless.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.primary.svc.headless" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: primary + {{- if or .Values.primary.service.headless.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.primary.service.headless.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.service.headless.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ template "postgresql.service.port" . }} + targetPort: tcp-postgresql + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/charts/modules/postgresql/templates/primary/svc.yaml b/charts/modules/postgresql/templates/primary/svc.yaml new file mode 100644 index 0000000..cf18480 --- /dev/null +++ b/charts/modules/postgresql/templates/primary/svc.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.primary.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: primary + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.primary.service.type }} + {{- if or (eq .Values.primary.service.type "LoadBalancer") (eq .Values.primary.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.primary.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") (not (empty .Values.primary.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} + {{- end }} + {{- if and .Values.primary.service.clusterIP (eq .Values.primary.service.type "ClusterIP") }} + clusterIP: {{ .Values.primary.service.clusterIP }} + {{- end }} + {{- if .Values.primary.service.sessionAffinity }} + sessionAffinity: {{ .Values.primary.service.sessionAffinity }} + {{- end }} + {{- if .Values.primary.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.service.port" . }} + targetPort: tcp-postgresql + {{- if and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) (not (empty .Values.primary.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.primary.service.nodePorts.postgresql }} + {{- else if eq .Values.primary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.primary.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/charts/modules/postgresql/templates/prometheusrule.yaml b/charts/modules/postgresql/templates/prometheusrule.yaml new file mode 100644 index 0000000..24be710 --- /dev/null +++ b/charts/modules/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }} +{{- end }} diff --git a/charts/modules/postgresql/templates/psp.yaml b/charts/modules/postgresql/templates/psp.yaml new file mode 100644 index 0000000..48d1175 --- /dev/null +++ b/charts/modules/postgresql/templates/psp.yaml @@ -0,0 +1,41 @@ +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.psp.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/modules/postgresql/templates/read/extended-configmap.yaml b/charts/modules/postgresql/templates/read/extended-configmap.yaml new file mode 100644 index 0000000..e329d13 --- /dev/null +++ b/charts/modules/postgresql/templates/read/extended-configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "postgresql.readReplicas.createExtendedConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extended-configuration" (include "postgresql.readReplica.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + override.conf: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extendedConfiguration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/charts/modules/postgresql/templates/read/metrics-configmap.yaml b/charts/modules/postgresql/templates/read/metrics-configmap.yaml new file mode 100644 index 0000000..b00a6ec --- /dev/null +++ b/charts/modules/postgresql/templates/read/metrics-configmap.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics (eq .Values.architecture "replication") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-metrics" (include "postgresql.readReplica.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/charts/modules/postgresql/templates/read/metrics-svc.yaml b/charts/modules/postgresql/templates/read/metrics-svc.yaml new file mode 100644 index 0000000..b3e5497 --- /dev/null +++ b/charts/modules/postgresql/templates/read/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.metrics.enabled (eq .Values.architecture "replication") }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "postgresql.readReplica.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics-read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.service.sessionAffinity }} + {{- if .Values.metrics.service.clusterIP }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.ports.metrics }} + targetPort: http-metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/charts/modules/postgresql/templates/read/networkpolicy.yaml b/charts/modules/postgresql/templates/read/networkpolicy.yaml new file mode 100644 index 0000000..c969cd7 --- /dev/null +++ b/charts/modules/postgresql/templates/read/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.networkPolicy.enabled (eq .Values.architecture "replication") .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled }} +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ printf "%s-ingress" (include "postgresql.readReplica.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: read + ingress: + {{- if and .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled (or .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector) }} + - from: + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector "context" $) | nindent 14 }} + {{- end }} + ports: + - port: {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- if .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/modules/postgresql/templates/read/servicemonitor.yaml b/charts/modules/postgresql/templates/read/servicemonitor.yaml new file mode 100644 index 0000000..d511d6b --- /dev/null +++ b/charts/modules/postgresql/templates/read/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled (eq .Values.architecture "replication") }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics-read + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics-read + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace | quote }} +{{- end }} diff --git a/charts/modules/postgresql/templates/read/statefulset.yaml b/charts/modules/postgresql/templates/read/statefulset.yaml new file mode 100644 index 0000000..b3ff1da --- /dev/null +++ b/charts/modules/postgresql/templates/read/statefulset.yaml @@ -0,0 +1,531 @@ +{{- if eq .Values.architecture "replication" }} +{{- $customUser := include "postgresql.username" . }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.labels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.readReplicas.replicaCount }} + serviceName: {{ include "postgresql.readReplica.svc.headless" . }} + {{- if .Values.readReplicas.updateStrategy }} + updateStrategy: {{- toYaml .Values.readReplicas.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: read + template: + metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: read + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "postgresql.readReplicas.createExtendedConfigmap" .) }} + checksum/extended-configuration: {{ include (print $.Template.BasePath "/read/extended-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.readReplicas.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.readReplicas.extraPodSpec }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraPodSpec "context" $) | nindent 6 }} + {{- end }} + serviceAccountName: {{ include "postgresql.serviceAccountName" . }} + {{- include "postgresql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.readReplicas.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAffinityPreset "component" "read" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.readReplicas.podAntiAffinityPreset "component" "read" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.readReplicas.nodeAffinityPreset.type "key" .Values.readReplicas.nodeAffinityPreset.key "values" .Values.readReplicas.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.readReplicas.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.readReplicas.priorityClassName }} + priorityClassName: {{ .Values.readReplicas.priorityClassName }} + {{- end }} + {{- if .Values.readReplicas.schedulerName }} + schedulerName: {{ .Values.readReplicas.schedulerName | quote }} + {{- end }} + {{- if .Values.readReplicas.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.readReplicas.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.readReplicas.podSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.readReplicas.hostNetwork }} + hostIPC: {{ .Values.readReplicas.hostIPC }} + initContainers: + {{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + # We don't require a privileged container in this case + {{- if .Values.readReplicas.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + chmod 600 {{ include "postgresql.tlsCertKey" . }} + volumeMounts: + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- else if and .Values.volumePermissions.enabled (or .Values.readReplicas.persistence.enabled .Values.shmVolume.enabled) }} + - name: init-chmod-data + image: {{ include "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + {{- if .Values.readReplicas.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.readReplicas.persistence.mountPath }} + {{- else }} + chown {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} {{ .Values.readReplicas.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.readReplicas.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.readReplicas.persistence.mountPath }}/conf {{- end }} + find {{ .Values.readReplicas.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.readReplicas.containerSecurityContext.runAsUser }}:{{ .Values.readReplicas.podSecurityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ include "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{ if .Values.readReplicas.persistence.enabled }} + - name: data + mountPath: {{ .Values.readReplicas.persistence.mountPath }} + {{- if .Values.readReplicas.persistence.subPath }} + subPath: {{ .Values.readReplicas.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.readReplicas.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.initContainers "context" $ ) | nindent 8 }} + {{- end }} + containers: + - name: postgresql + image: {{ include "postgresql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.readReplicas.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.readReplicas.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: {{ .Values.containerPorts.postgresql | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: {{ .Values.readReplicas.persistence.mountPath | quote }} + {{- if .Values.readReplicas.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + # Authentication + {{- if and (not (empty $customUser)) (ne $customUser "postgres") .Values.auth.enablePostgresUser }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.adminPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_PASSWORD_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.userPasswordKey" . }} + {{- end }} + # Replication + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ .Values.auth.replicationUsername | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.replicationPasswordKey" . }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ include "postgresql.primary.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.service.port" . | quote }} + # TLS + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ include "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ include "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ include "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ include "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + # Audit + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + # Others + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.readReplicas.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.readReplicas.extraEnvVarsCM .Values.readReplicas.extraEnvVarsSecret }} + envFrom: + {{- if .Values.readReplicas.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.readReplicas.extraEnvVarsCM }} + {{- end }} + {{- if .Values.readReplicas.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.readReplicas.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ .Values.containerPorts.postgresql }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.readReplicas.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser| quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ default "postgres" $customUser | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- else }} + - exec pg_isready -U {{default "postgres" $customUser | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ .Values.containerPorts.postgresql }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readReplicas.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readReplicas.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.resources }} + resources: {{- toYaml .Values.readReplicas.resources | nindent 12 }} + {{- end }} + {{- if .Values.readReplicas.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.readReplicas.extendedConfiguration }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.readReplicas.persistence.enabled }} + - name: data + mountPath: {{ .Values.readReplicas.persistence.mountPath }} + {{- if .Values.readReplicas.persistence.subPath }} + subPath: {{ .Values.readReplicas.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.customMetrics }} + args: [ "--extend.query-path", "/conf/custom-metrics.yaml" ] + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.auth.database or .Values.global.postgresql.auth.database)" (include "postgresql.database" .) }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.service.port" .)) $database }} + {{- if .Values.auth.usePasswordFiles }} + - name: DATA_SOURCE_PASS_FILE + value: {{ printf "/opt/bitnami/postgresql/secrets/%s" (ternary "password" "postgres-password" (and (not (empty $customUser)) (ne $customUser "postgres"))) }} + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ include "postgresql.secretName" . }} + key: {{ include "postgresql.userPasswordKey" . }} + {{- end }} + - name: DATA_SOURCE_USER + value: {{ default "postgres" $customUser | quote }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: http-metrics + containerPort: {{ .Values.metrics.containerPorts.metrics }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- end }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http-metrics + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.readReplicas.extendedConfiguration }} + - name: postgresql-extended-config + configMap: + name: {{ include "postgresql.readReplicas.extendedConfigmapName" . }} + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: postgresql-password + secret: + secretName: {{ include "postgresql.secretName" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "postgresql.tlsSecretName" . }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + {{- if .Values.shmVolume.sizeLimit }} + sizeLimit: {{ .Values.shmVolume.sizeLimit }} + {{- end }} + {{- end }} + {{- if .Values.readReplicas.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.readReplicas.persistence.enabled .Values.readReplicas.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.readReplicas.persistence.existingClaim $ }} + {{- else if not .Values.readReplicas.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.readReplicas.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.readReplicas.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.readReplicas.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + {{- if .Values.readReplicas.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + resources: + requests: + storage: {{ .Values.readReplicas.persistence.size | quote }} + {{- if .Values.readReplicas.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- include "common.storage.class" (dict "persistence" .Values.readReplicas.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/modules/postgresql/templates/read/svc-headless.yaml b/charts/modules/postgresql/templates/read/svc-headless.yaml new file mode 100644 index 0000000..ee8f756 --- /dev/null +++ b/charts/modules/postgresql/templates/read/svc-headless.yaml @@ -0,0 +1,39 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.readReplica.svc.headless" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: read + {{- if or .Values.readReplicas.service.headless.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.readReplicas.service.headless.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.readReplicas.service.headless.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + type: ClusterIP + clusterIP: None + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other Postgresql pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + ports: + - name: tcp-postgresql + port: {{ include "postgresql.readReplica.service.port" . }} + targetPort: tcp-postgresql + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/charts/modules/postgresql/templates/read/svc.yaml b/charts/modules/postgresql/templates/read/svc.yaml new file mode 100644 index 0000000..3eece4d --- /dev/null +++ b/charts/modules/postgresql/templates/read/svc.yaml @@ -0,0 +1,53 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "postgresql.readReplica.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: read + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.readReplicas.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.readReplicas.service.type }} + {{- if or (eq .Values.readReplicas.service.type "LoadBalancer") (eq .Values.readReplicas.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.readReplicas.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.readReplicas.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.readReplicas.service.type "LoadBalancer") (not (empty .Values.readReplicas.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.readReplicas.service.loadBalancerIP }} + {{- end }} + {{- if and .Values.readReplicas.service.clusterIP (eq .Values.readReplicas.service.type "ClusterIP") }} + clusterIP: {{ .Values.readReplicas.service.clusterIP }} + {{- end }} + {{- if .Values.readReplicas.service.sessionAffinity }} + sessionAffinity: {{ .Values.readReplicas.service.sessionAffinity }} + {{- end }} + {{- if .Values.readReplicas.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ include "postgresql.readReplica.service.port" . }} + targetPort: tcp-postgresql + {{- if and (or (eq .Values.readReplicas.service.type "NodePort") (eq .Values.readReplicas.service.type "LoadBalancer")) (not (empty .Values.readReplicas.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.readReplicas.service.nodePorts.postgresql }} + {{- else if eq .Values.readReplicas.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.readReplicas.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.readReplicas.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: read +{{- end }} diff --git a/charts/modules/postgresql/templates/role.yaml b/charts/modules/postgresql/templates/role.yaml new file mode 100644 index 0000000..00f9222 --- /dev/null +++ b/charts/modules/postgresql/templates/role.yaml @@ -0,0 +1,31 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +# yamllint disable rule:indentation +rules: + {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} + {{- if and $pspAvailable .Values.psp.create }} + - apiGroups: + - 'policy' + resources: + - 'podsecuritypolicies' + verbs: + - 'use' + resourceNames: + - {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +# yamllint enable rule:indentation +{{- end }} diff --git a/charts/modules/postgresql/templates/rolebinding.yaml b/charts/modules/postgresql/templates/rolebinding.yaml new file mode 100644 index 0000000..0311c0e --- /dev/null +++ b/charts/modules/postgresql/templates/rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "common.names.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "postgresql.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end }} diff --git a/charts/modules/postgresql/templates/secrets.yaml b/charts/modules/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..5f28fb3 --- /dev/null +++ b/charts/modules/postgresql/templates/secrets.yaml @@ -0,0 +1,29 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.auth.enablePostgresUser }} + postgres-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "postgres-password" "providedValues" (list "global.postgresql.auth.postgresPassword" "auth.postgresPassword") "context" $) }} + {{- end }} + {{- if not (empty (include "postgresql.username" .)) }} + password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "password" "providedValues" (list "global.postgresql.auth.password" "auth.password") "context" $) }} + {{- end }} + {{- if eq .Values.architecture "replication" }} + replication-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "replication-password" "providedValues" (list "auth.replicationPassword") "context" $) }} + {{- end }} + # We don't auto-generate LDAP password when it's not provided as we do for other passwords + {{- if and .Values.ldap.enabled (or .Values.ldap.bind_password .Values.ldap.bindpw) }} + ldap-password: {{ coalesce .Values.ldap.bind_password .Values.ldap.bindpw | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/charts/modules/postgresql/templates/serviceaccount.yaml b/charts/modules/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..179f8f2 --- /dev/null +++ b/charts/modules/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "postgresql.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/modules/postgresql/templates/tls-secrets.yaml b/charts/modules/postgresql/templates/tls-secrets.yaml new file mode 100644 index 0000000..59c5776 --- /dev/null +++ b/charts/modules/postgresql/templates/tls-secrets.yaml @@ -0,0 +1,27 @@ +{{- if (include "postgresql.createTlsSecret" . ) }} +{{- $ca := genCA "postgresql-ca" 365 }} +{{- $fullname := include "common.names.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $primaryHeadlessServiceName := include "postgresql.primary.svc.headless" . }} +{{- $readHeadlessServiceName := include "postgresql.readReplica.svc.headless" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $readHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-crt" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/charts/modules/postgresql/values.schema.json b/charts/modules/postgresql/values.schema.json new file mode 100644 index 0000000..fc41483 --- /dev/null +++ b/charts/modules/postgresql/values.schema.json @@ -0,0 +1,156 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "PostgreSQL architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`" + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "enablePostgresUser": { + "type": "boolean", + "title": "Enable \"postgres\" admin user", + "description": "Assign a password to the \"postgres\" admin user. Otherwise, remote access will be blocked for this user", + "form": true + }, + "postgresPassword": { + "type": "string", + "title": "Password for the \"postgres\" admin user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true + }, + "database": { + "type": "string", + "title": "PostgreSQL custom database", + "description": "Name of the custom database to be created during the 1st initialization of PostgreSQL", + "form": true + }, + "username": { + "type": "string", + "title": "PostgreSQL custom user", + "description": "Name of the custom user to be created during the 1st initialization of PostgreSQL. This user only has permissions on the PostgreSQL custom database", + "form": true + }, + "password": { + "type": "string", + "title": "Password for the custom user to create", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true + }, + "replicationUsername": { + "type": "string", + "title": "PostgreSQL replication user", + "description": "Name of user used to manage replication.", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + }, + "replicationPassword": { + "type": "string", + "title": "Password for PostgreSQL replication user", + "description": "Defaults to a random 10-character alphanumeric string if not set", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "readReplicas": { + "type": "integer", + "title": "read Replicas", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/charts/modules/postgresql/values.yaml b/charts/modules/postgresql/values.yaml new file mode 100644 index 0000000..b15c8c6 --- /dev/null +++ b/charts/modules/postgresql/values.yaml @@ -0,0 +1,1411 @@ +## @section Global parameters +## Please, note that this will override the parameters, including dependencies, configured to use the global value +## +global: + ## @param global.imageRegistry Global Docker image registry + ## + imageRegistry: "" + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + ## @param global.storageClass Global StorageClass for Persistent Volume(s) + ## + storageClass: "" + postgresql: + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). + ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## + auth: + postgresPassword: "" + username: "" + password: "" + database: "" + existingSecret: "" + secretKeys: + adminPasswordKey: "" + userPasswordKey: "" + replicationPasswordKey: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + service: + ports: + postgresql: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section PostgreSQL common parameters +## + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## @param image.registry PostgreSQL image registry +## @param image.repository PostgreSQL image repository +## @param image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy PostgreSQL image pull policy +## @param image.pullSecrets Specify image pull secrets +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 15.1.0-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run +## +auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` with key `postgres-password` is provided + ## + postgresPassword: "" + ## @param auth.username Name for a custom user to create + ## + username: "" + ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` with key `password` is provided + ## + password: "" + ## @param auth.database Name for a custom database to create + ## + database: "" + ## @param auth.replicationUsername Name of the replication user + ## + replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` with key `replication-password` is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. + ## + existingSecret: "" + ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## + secretKeys: + adminPasswordKey: postgres-password + userPasswordKey: password + replicationPasswordKey: replication-password + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: false +## @param architecture PostgreSQL architecture (`standalone` or `replication`) +## +architecture: standalone +## Replication configuration +## Ignored if `architecture` is `standalone` +## +replication: + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## + applicationName: my_application +## @param containerPorts.postgresql PostgreSQL container port +## +containerPorts: + postgresql: 5432 +## Audit settings +## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing +## @param audit.logHostname Log client hostnames +## @param audit.logConnections Add client log-in operations to the log file +## @param audit.logDisconnections Add client log-outs operations to the log file +## @param audit.pgAuditLog Add operations to log using the pgAudit extension +## @param audit.pgAuditLogCatalog Log catalog using pgAudit +## @param audit.clientMinMessages Message log level to share with the user +## @param audit.logLinePrefix Template for log line prefix (default if not set) +## @param audit.logTimezone Timezone for the log timestamps +## +audit: + logHostname: false + logConnections: false + logDisconnections: false + pgAuditLog: "" + pgAuditLogCatalog: "off" + clientMinMessages: error + logLinePrefix: "" + logTimezone: "" +## LDAP configuration +## @param ldap.enabled Enable LDAP support +## DEPRECATED ldap.url It will removed in a future, please use 'ldap.uri' instead +## @param ldap.server IP address or name of the LDAP server. +## @param ldap.port Port number on the LDAP server to connect to +## @param ldap.prefix String to prepend to the user name when forming the DN to bind +## @param ldap.suffix String to append to the user name when forming the DN to bind +## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead +## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead +## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead +## @param ldap.basedn Root DN to begin the search for the user in +## @param ldap.binddn DN of user to bind to LDAP +## @param ldap.bindpw Password for the user to bind to LDAP +## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead +## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead +## @param ldap.searchAttribute Attribute to match against the user name in the search +## @param ldap.searchFilter The search filter to use when doing search+bind authentication +## @param ldap.scheme Set to `ldaps` to use LDAPS +## DEPRECATED ldap.tls as string is deprecated,please use 'ldap.tls.enabled' instead +## @param ldap.tls.enabled Se to true to enable TLS encryption +## +ldap: + enabled: false + server: "" + port: "" + prefix: "" + suffix: "" + basedn: "" + binddn: "" + bindpw: "" + searchAttribute: "" + searchFilter: "" + scheme: "" + tls: + enabled: false + ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. + ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html + uri: "" +## @param postgresqlDataDir PostgreSQL data dir folder +## +postgresqlDataDir: /bitnami/postgresql/data +## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) +## +postgresqlSharedPreloadLibraries: "pgaudit" +## Start PostgreSQL pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` +## ref: https://github.com/docker-library/postgres/issues/416 +## ref: https://github.com/containerd/containerd/issues/3654 +## +shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## + enabled: true + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" + +## @section PostgreSQL Primary parameters +## +primary: + ## @param primary.name Name of the primary database (eg primary, master, leader, ...) + ## + name: primary + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments + ## + initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + password: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param primary.resources.limits The resources limits for the PostgreSQL Primary containers + ## @param primary.resources.requests.memory The requested memory for the PostgreSQL Primary containers + ## @param primary.resources.requests.cpu The requested cpu for the PostgreSQL Primary containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enable container security context + ## @param primary.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) + ## + hostNetwork: false + ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## + priorityClassName: "" + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service + ## + annotations: {} + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param primary.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param primary.persistence.labels Labels for the PVC + ## + labels: {} + ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`) +## +readReplicas: + ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...) + ## + name: read + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param readReplicas.resources.limits The resources limits for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.memory The requested memory for the PostgreSQL read only containers + ## @param readReplicas.resources.requests.cpu The requested cpu for the PostgreSQL read only containers + ## + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enable container security context + ## @param readReplicas.containerSecurityContext.runAsUser User ID for the container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) + ## + hostNetwork: false + ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) + ## + labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## + annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## + podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## + podAnnotations: {} + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) + ## + priorityClassName: "" + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) + ## + extraVolumeMounts: [] + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) + ## + extraVolumes: [] + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service + ## + annotations: {} + ## PostgreSQL read only persistence configuration + ## + persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## + enabled: true + ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param readReplicas.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param readReplicas.persistence.labels Labels for the PVC + ## + labels: {} + ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param readReplicas.persistence.dataSource Custom PVC data source + ## + dataSource: {} + +## @section NetworkPolicy parameters + +## Add networkpolicies +## +networkPolicy: + ## @param networkPolicy.enabled Enable network policies + ## + enabled: false + ## @param networkPolicy.metrics.enabled Enable network policies for metrics (prometheus) + ## @param networkPolicy.metrics.namespaceSelector [object] Monitoring namespace selector labels. These labels will be used to identify the prometheus' namespace. + ## @param networkPolicy.metrics.podSelector [object] Monitoring pod selector labels. These labels will be used to identify the Prometheus pods. + ## + metrics: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: monitoring + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: monitoring + ## + podSelector: {} + ## Ingress Rules + ## + ingressRules: + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL primary node only accessible from a particular origin. + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL primary node. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.primaryAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL primary node. + ## + primaryAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## customRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.enabled Enable ingress rule that makes PostgreSQL read-only nodes only accessible from a particular origin. + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.namespaceSelector [object] Namespace selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed namespace(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.podSelector [object] Pods selector label that is allowed to access the PostgreSQL read-only nodes. This label will be used to identified the allowed pod(s). + ## @param networkPolicy.ingressRules.readReplicasAccessOnlyFrom.customRules [object] Custom network policy for the PostgreSQL read-only nodes. + ## + readReplicasAccessOnlyFrom: + enabled: false + ## e.g: + ## namespaceSelector: + ## label: ingress + ## + namespaceSelector: {} + ## e.g: + ## podSelector: + ## label: access + ## + podSelector: {} + ## custom ingress rules + ## e.g: + ## CustomRules: + ## - from: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + ## @param networkPolicy.egressRules.denyConnectionsToExternal Enable egress rule that denies outgoing traffic outside the cluster, except for DNS (port 53). + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + # Deny connections to external. This is not compatible with an external database. + denyConnectionsToExternal: false + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: {} + +## @section Volume Permissions parameters + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r50 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters + +## Service account for PostgreSQL to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Creates role for ServiceAccount +## @param rbac.create Create Role and RoleBinding (required for PSP to work) +## +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later +## +psp: + create: false + +## @section Metrics Parameters + +metrics: + ## @param metrics.enabled Start a prometheus exporter + ## + enabled: false + ## @param metrics.image.registry PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository PostgreSQL Prometheus Exporter image repository + ## @param metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy + ## @param metrics.image.pullSecrets Specify image pull secrets + ## + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.11.1-debian-11-r27 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + ## customMetrics: + ## pg_database: + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" + ## + customMetrics: {} + ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + ## extraEnvVars: + ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + ## value: "true" + ## + extraEnvVars: [] + ## PostgreSQL Prometheus exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enable PostgreSQL Prometheus exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set PostgreSQL Prometheus exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsNonRoot Set PostgreSQL Prometheus exporter containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port + ## + containerPorts: + metrics: 9187 + ## PostgreSQL Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the PostgreSQL Prometheus exporter container + ## @param metrics.resources.requests The requested resources for the PostgreSQL Prometheus exporter container + ## + resources: + limits: {} + requests: {} + ## Service configuration + ## + service: + ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port + ## + ports: + metrics: 9187 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "common.names.fullname" .) }}"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ include "common.names.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] diff --git a/charts/modules/rabbitmq/.helmignore b/charts/modules/rabbitmq/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/charts/modules/rabbitmq/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/modules/rabbitmq/Chart.lock b/charts/modules/rabbitmq/Chart.lock new file mode 100644 index 0000000..5a19c2c --- /dev/null +++ b/charts/modules/rabbitmq/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.2.1 +digest: sha256:6c67cfa9945bf608209d4e2ca8f17079fca4770907c7902d984187ab5b21811e +generated: "2022-11-28T23:36:12.245721472Z" diff --git a/charts/modules/rabbitmq/Chart.yaml b/charts/modules/rabbitmq/Chart.yaml new file mode 100644 index 0000000..36731f4 --- /dev/null +++ b/charts/modules/rabbitmq/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.11.4 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: RabbitMQ is an open source general-purpose message broker that is designed + for consistent, highly-available messaging scenarios (both synchronous and asynchronous). +home: https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: rabbitmq +sources: +- https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq +- https://www.rabbitmq.com +version: 11.1.5 diff --git a/charts/modules/rabbitmq/README.md b/charts/modules/rabbitmq/README.md new file mode 100644 index 0000000..da036dd --- /dev/null +++ b/charts/modules/rabbitmq/README.md @@ -0,0 +1,718 @@ + + +# RabbitMQ packaged by Bitnami + +RabbitMQ is an open source general-purpose message broker that is designed for consistent, highly-available messaging scenarios (both synchronous and asynchronous). + +[Overview of RabbitMQ](https://www.rabbitmq.com) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```bash +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### RabbitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------------------------------------------------- | --------------------- | +| `image.registry` | RabbitMQ image registry | `docker.io` | +| `image.repository` | RabbitMQ image repository | `bitnami/rabbitmq` | +| `image.tag` | RabbitMQ image tag (immutable tags are recommended) | `3.11.4-debian-11-r0` | +| `image.digest` | RabbitMQ image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + + +### Common parameters + +| Name | Description | Value | +| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `dnsPolicy` | DNS Policy for pod | `""` | +| `dnsConfig` | DNS Configuration pod | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.securePassword` | Whether to set the RabbitMQ password securely. This is incompatible with loading external RabbitMQ definitions and 'true' when not setting the auth.password parameter. | `true` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `clustering.partitionHandling` | Switch Partition Handling Strategy. Either `autoheal` or `pause-minority` or `pause-if-all-down` or `ignore` | `autoheal` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.file` | Name of the definitions file | `/app/load_definition.json` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `lifecycleHooks` | Overwrite livecycle for the RabbitMQ container(s) to automate configuration before or after startup | `{}` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `containerPorts.amqp` | | `5672` | +| `containerPorts.amqpTls` | | `5671` | +| `containerPorts.dist` | | `25672` | +| `containerPorts.manager` | | `15672` | +| `containerPorts.epmd` | | `4369` | +| `containerPorts.metrics` | | `9419` | +| `initScripts` | Dictionary of init scripts. Evaluated as a template. | `{}` | +| `initScriptsCM` | ConfigMap with the init scripts. Evaluated as a template. | `""` | +| `initScriptsSecret` | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.uri` | LDAP connection string. | `""` | +| `ldap.servers` | List of LDAP servers hostnames. This is valid only if ldap.uri is not set | `[]` | +| `ldap.port` | LDAP servers port. This is valid only if ldap.uri is not set | `""` | +| `ldap.userDnPattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind. | `""` | +| `ldap.binddn` | DN of the account used to search in the LDAP server. | `""` | +| `ldap.bindpw` | Password for binddn account. | `""` | +| `ldap.basedn` | Base DN path where binddn account will search for the users. | `""` | +| `ldap.uidField` | Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration | `""` | +| `ldap.uidField` | Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration | `""` | +| `ldap.authorisationEnabled` | Enable LDAP authorisation. Please set 'advancedConfiguration' with tag, topic, resources and vhost mappings | `false` | +| `ldap.tls.enabled` | Enabled TLS configuration. | `false` | +| `ldap.tls.startTls` | Use STARTTLS instead of LDAPS. | `false` | +| `ldap.tls.skipVerify` | Skip any SSL verification (hostanames or certificates) | `false` | +| `ldap.tls.verify` | Verify connection. Valid values are 'verify_peer' or 'verify_none' | `verify_peer` | +| `ldap.tls.certificatesMountPath` | Where LDAP certifcates are mounted. | `/opt/bitnami/rabbitmq/ldap/certs` | +| `ldap.tls.certificatesSecret` | Secret with LDAP certificates. | `""` | +| `ldap.tls.CAFilename` | CA certificate filename. Should match with the CA entry key in the ldap.tls.certificatesSecret. | `""` | +| `ldap.tls.certFilename` | Client certificate filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. | `""` | +| `ldap.tls.certKeyFilename` | Client Key filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. | `""` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategy.type` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set RabbitMQ pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled RabbitMQ containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set RabbitMQ containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set RabbitMQ container's Security Context runAsNonRoot | `true` | +| `resources.limits` | The resources limits for RabbitMQ containers | `{}` | +| `resources.requests` | The requested resources for RabbitMQ containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `30` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `20` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + + +### RBAC parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------ | ------ | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `serviceAccount.automountServiceAccountToken` | Auto-mount the service account token in the pod | `true` | +| `serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `rbac.create` | Whether RBAC rules should be created | `true` | + + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | ------------------------------------------------ | -------------------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessModes` | PVC Access Modes for RabbitMQ data volume | `["ReadWriteOnce"]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.mountPath` | The path the volume will be mounted at | `/bitnami/rabbitmq/mnesia` | +| `persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.annotations` | Persistence annotations. Evaluated as a template | `{}` | + + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.distPortEnabled` | Erlang distribution server port | `true` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.epmdPortEnabled` | RabbitMQ EPMD Discovery service port | `true` | +| `service.ports.amqp` | Amqp service port | `5672` | +| `service.ports.amqpTls` | Amqp TLS service port | `5671` | +| `service.ports.dist` | Erlang distribution service port | `25672` | +| `service.ports.manager` | RabbitMQ Manager service port | `15672` | +| `service.ports.metrics` | RabbitMQ Prometheues metrics service port | `9419` | +| `service.ports.epmd` | EPMD Discovery service port | `4369` | +| `service.portNames.amqp` | Amqp service port name | `amqp` | +| `service.portNames.amqpTls` | Amqp TLS service port name | `amqp-ssl` | +| `service.portNames.dist` | Erlang distribution service port name | `dist` | +| `service.portNames.manager` | RabbitMQ Manager service port name | `http-stats` | +| `service.portNames.metrics` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.portNames.epmd` | EPMD Discovery service port name | `epmd` | +| `service.nodePorts.amqp` | Node port for Ampq | `""` | +| `service.nodePorts.amqpTls` | Node port for Ampq TLS | `""` | +| `service.nodePorts.dist` | Node port for Erlang distribution | `""` | +| `service.nodePorts.manager` | Node port for RabbitMQ Manager | `""` | +| `service.nodePorts.epmd` | Node port for EPMD Discovery | `""` | +| `service.nodePorts.metrics` | Node port for RabbitMQ Prometheues metrics | `""` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.clusterIP` | Kubernetes service Cluster IP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraRules` | The list of additional rules to be added to this ingress record. Evaluated as a template | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.existingSecret` | It is you own the certificate as secret. | `""` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` | + + +### Metrics Parameters + +| Name | Description | Value | +| ------------------------------------------ | -------------------------------------------------------------------------------------- | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | +| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `metrics.serviceMonitor.annotations` | Extra annotations for the ServiceMonitor | `{}` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `11-debian-11-r56` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +The above parameters map to the env variables defined in [bitnami/rabbitmq](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq). For more information please refer to the [bitnami/rabbitmq](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + my-repo/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml my-repo/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command. + +Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/). + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +* Create a secret with the certificates and associate the secret when deploying the chart +* Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls-ingress/). + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](https://www.rabbitmq.com/management.html#load-definitions). + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. + +Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/). + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/). + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +* Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512MB" +``` + +* Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +``` +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/). + +### Advanced logging + +In case you want to configure RabbitMQ logging set `logs` value to false and set the log config in extraConfiguration following the [official documentation](https://www.rabbitmq.com/logging.html#log-file-location). + +An example: + +```yaml +logs: false # custom logging +extraConfiguration: | + log.default.level = warning + log.file = false + log.console = true + log.console.level = warning + log.console.formatter = json +``` + +### Recover the cluster from complete shutdown + +> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand. + +The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover. + +This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state: + +```console +$ kubectl delete statefulset STATEFULSET_NAME --cascade=false +$ helm upgrade RELEASE_NAME my-repo/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests. + +If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod): + +```console +$ helm upgrade RELEASE_NAME my-repo/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set clustering.forceBoot=true \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting). + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME my-repo/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release my-repo/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 11.0.0 + +This major version changes the default RabbitMQ image from 3.10.x to 3.11.x. Follow the [official instructions](https://www.rabbitmq.com/upgrade.html) to upgrade from 3.10 to 3.11. + +### To 10.0.0 + +This major version changes the default RabbitMQ image from 3.9.x to 3.10.x. Follow the [official instructions](https://www.rabbitmq.com/upgrade.html) to upgrade from 3.9 to 3.10. + +### To 9.0.0 + +This major release renames several values in this chart and adds missing features, in order to be aligned with the rest of the assets in the Bitnami charts repository. + + .dist + .manager + .metrics + .epmd + +- `service.port` has been renamed as `service.ports.amqp`. +- `service.portName` has been renamed as `service.portNames.amqp`. +- `service.nodePort`has been renamed as `service.nodePorts.amqp`. +- `service.tlsPort` has been renamed as `service.ports.amqpTls`. +- `service.tlsPortName` has been renamed as `service.portNames.amqpTls`. +- `service.tlsNodePort` has been renamed as `service.nodePorts.amqpTls`. +- `service.epmdPortName` has been renamed as `service.portNames.epmd`. +- `service.epmdNodePort` has been renamed as `service.nodePorts.epmd`. +- `service.distPort` has been renamed as `service.ports.dist`. +- `service.distPortName` has been renamed as `service.portNames.dist`. +- `service.distNodePort` has been renamed as `service.nodePorts.dist`. +- `service.managerPort` has been renamed as `service.ports.manager`. +- `service.managerPortName` has been renamed as `service.portNames.manager`. +- `service.managerNodePort` has been renamed as `service.nodePorts.manager`. +- `service.metricsPort` has been renamed as `service.ports.metrics`. +- `service.metricsPortName` has been renamed as `service.portNames.metrics`. +- `service.metricsNodePort` has been renamed as `service.nodePorts.metrics`. +- `persistence.volumes` has been removed, as it duplicates the parameter `extraVolumes`. +- `ingress.certManager` has been removed. +- `metrics.serviceMonitor.relabellings` has been replaced with `metrics.serviceMonitor.relabelings`, and it sets the field `relabelings` instead of `metricRelabelings`. +- `metrics.serviceMonitor.additionalLabels` has been renamed as `metrics.serviceMonitor.labels` +- `updateStrategyType` has been removed, use the field `updateStrategy` instead, which is interpreted as a template. +- The content of `podSecurityContext` and `containerSecurityContext` have been modified. +- The behavior of VolumePermissions has been modified to not change ownership of '.snapshot' and 'lost+found' +- Introduced the values `ContainerPorts.*`, separating the service and container ports configuration. + +### To 8.21.0 + +This new version of the chart bumps the RabbitMQ version to `3.9.1`. It is considered a minor release, and no breaking changes are expected. Additionally, RabbitMQ `3.9.X` nodes can run alongside `3.8.X` nodes. + +See the [Upgrading guide](https://www.rabbitmq.com/upgrade.html) and the [RabbitMQ change log](https://www.rabbitmq.com/changelog.html) for further documentation. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/). + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. +- The layout of the persistent volumes has changed (if using persistence). Action is required if preserving data through the upgrade is desired: + - The data has moved from `mnesia/` within the persistent volume to the root of the persistent volume + - The `config/` and `schema/` directories within the persistent volume are no longer used + - An init container can be used to move and clean up the peristent volumes. An example can be found [here](https://github.com/bitnami/charts/issues/10913#issuecomment-1169619513). + - Alternately the value `persistence.subPath` can be overridden to be `mnesia` so that the directory layout is consistent with what it was previously. + - Note however that this will leave the unused `config/` and `schema/` directories within the peristent volume forever. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/) + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/charts/modules/rabbitmq/charts/common/.helmignore b/charts/modules/rabbitmq/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/modules/rabbitmq/charts/common/Chart.yaml b/charts/modules/rabbitmq/charts/common/Chart.yaml new file mode 100644 index 0000000..653c063 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.2.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.2.1 diff --git a/charts/modules/rabbitmq/charts/common/README.md b/charts/modules/rabbitmq/charts/common/README.md new file mode 100644 index 0000000..ec43a5f --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/README.md @@ -0,0 +1,351 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/charts/modules/rabbitmq/charts/common/templates/_affinities.tpl b/charts/modules/rabbitmq/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..81902a6 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_affinities.tpl @@ -0,0 +1,106 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_capabilities.tpl b/charts/modules/rabbitmq/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..9d9b760 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_errors.tpl b/charts/modules/rabbitmq/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_images.tpl b/charts/modules/rabbitmq/charts/common/templates/_images.tpl new file mode 100644 index 0000000..46c659e --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_ingress.tpl b/charts/modules/rabbitmq/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..831da9c --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_labels.tpl b/charts/modules/rabbitmq/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_names.tpl b/charts/modules/rabbitmq/charts/common/templates/_names.tpl new file mode 100644 index 0000000..1bdac8b --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_secrets.tpl b/charts/modules/rabbitmq/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4267d42 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_storage.tpl b/charts/modules/rabbitmq/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_tplvalues.tpl b/charts/modules/rabbitmq/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_utils.tpl b/charts/modules/rabbitmq/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..8c22b2a --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/_warnings.tpl b/charts/modules/rabbitmq/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/validations/_cassandra.tpl b/charts/modules/rabbitmq/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/validations/_mariadb.tpl b/charts/modules/rabbitmq/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/validations/_mongodb.tpl b/charts/modules/rabbitmq/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..f820ec1 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/validations/_mysql.tpl b/charts/modules/rabbitmq/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 0000000..74472a0 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/validations/_postgresql.tpl b/charts/modules/rabbitmq/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/validations/_redis.tpl b/charts/modules/rabbitmq/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..dcccfc1 --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/templates/validations/_validations.tpl b/charts/modules/rabbitmq/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/charts/common/values.yaml b/charts/modules/rabbitmq/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/charts/modules/rabbitmq/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/charts/modules/rabbitmq/templates/NOTES.txt b/charts/modules/rabbitmq/templates/NOTES.txt new file mode 100644 index 0000000..2d26bce --- /dev/null +++ b/charts/modules/rabbitmq/templates/NOTES.txt @@ -0,0 +1,154 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.ports.amqp .Values.service.ports.amqpTls -}} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 -d)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 -d)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $servicePort }} at {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='amqp')].nodePort}" services {{ include "common.names.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='amqp')].nodePort}" services {{ include "common.names.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='http-stats')].nodePort}" services {{ include "common.names.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.ports.manager }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.ports.manager }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.service.ports.manager }}:{{ .Values.service.ports.manager }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.service.ports.metrics }}:{{ .Values.service.ports.metrics }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.ports.metrics }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} + +{{- end }} diff --git a/charts/modules/rabbitmq/templates/_helpers.tpl b/charts/modules/rabbitmq/templates/_helpers.tpl new file mode 100644 index 0000000..d1aa125 --- /dev/null +++ b/charts/modules/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,227 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 o base 10 number system. +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} +{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }} +{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }} +{{- if eq $unit "Ki" }} + {{- mul $value 1024 }} +{{- else if eq $unit "Mi" }} + {{- mul $value 1024 1024 }} +{{- else if eq $unit "Gi" }} + {{- mul $value 1024 1024 1024 }} +{{- else if eq $unit "Ti" }} + {{- mul $value 1024 1024 1024 1024 }} +{{- else if eq $unit "Pi" }} + {{- mul $value 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "Ei" }} + {{- mul $value 1024 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "K" }} + {{- mul $value 1000 }} +{{- else if eq $unit "M" }} + {{- mul $value 1000 1000 }} +{{- else if eq $unit "G" }} + {{- mul $value 1000 1000 1000 }} +{{- else if eq $unit "T" }} + {{- mul $value 1000 1000 1000 1000 }} +{{- else if eq $unit "P" }} + {{- mul $value 1000 1000 1000 1000 1000 }} +{{- else if eq $unit "E" }} + {{- mul $value 1000 1000 1000 1000 1000 1000 }} +{{- end }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- $userDnPattern := coalesce .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} +{{- if or (and (not (gt $serversListLength 0)) (empty .Values.ldap.uri)) (and (not $userDnPattern) (not .Values.ldap.basedn)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers" or "ldap.uri" are mandatory + to configure the connection and "ldap.userDnPattern" or "ldap.basedn" are necessary to lookup the users. Please provide them: + $ helm install {{ .Release.Name }} my-repo/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]=my-ldap-server" \ + --set ldap.port="389" \ + --set ldap.userDnPattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} my-repo/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} my-repo/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations ))) (not .Values.ingress.selfSigned) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Rely on cert-manager to create it by setting the corresponding annotations + - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts volume name. +*/}} +{{- define "rabbitmq.initScripts" -}} +{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}} +{{- end -}} diff --git a/charts/modules/rabbitmq/templates/config-secret.yaml b/charts/modules/rabbitmq/templates/config-secret.yaml new file mode 100644 index 0000000..80e93e6 --- /dev/null +++ b/charts/modules/rabbitmq/templates/config-secret.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-config" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | b64enc | nindent 4 }} + {{- if .Values.advancedConfiguration }} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | b64enc | nindent 4 }} + {{- end }} diff --git a/charts/modules/rabbitmq/templates/extra-list.yaml b/charts/modules/rabbitmq/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/charts/modules/rabbitmq/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/charts/modules/rabbitmq/templates/ingress.yaml b/charts/modules/rabbitmq/templates/ingress.yaml new file mode 100644 index 0000000..2f23b10 --- /dev/null +++ b/charts/modules/rabbitmq/templates/ingress.yaml @@ -0,0 +1,64 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.portNames.manager "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" $.Values.service.portNames.manager "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + {{- if .Values.ingress.existingSecret }} + secretName: {{ .Values.ingress.existingSecret }} + {{- else }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname | trunc 63 | trimSuffix "-" }} + {{- end }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/rabbitmq/templates/init-configmap.yaml b/charts/modules/rabbitmq/templates/init-configmap.yaml new file mode 100644 index 0000000..d84a353 --- /dev/null +++ b/charts/modules/rabbitmq/templates/init-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.initScripts }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" ( dict "value" .Values.initScripts "context" $ ) | nindent 4 }} +{{- end }} diff --git a/charts/modules/rabbitmq/templates/networkpolicy.yaml b/charts/modules/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 0000000..a5affcd --- /dev/null +++ b/charts/modules/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,40 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.service.ports.epmd }} # EPMD + - port: {{ .Values.service.ports.amqp }} + - port: {{ .Values.service.ports.amqpTls }} + - port: {{ .Values.service.ports.dist }} + - port: {{ .Values.service.ports.manager }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ printf "%s-client" (include "common.names.fullname" .) }}: "true" + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if .Values.networkPolicy.additionalRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }} + {{- end }} + {{- end }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.service.ports.metrics }} +{{- end }} diff --git a/charts/modules/rabbitmq/templates/pdb.yaml b/charts/modules/rabbitmq/templates/pdb.yaml new file mode 100644 index 0000000..827b49b --- /dev/null +++ b/charts/modules/rabbitmq/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/modules/rabbitmq/templates/prometheusrule.yaml b/charts/modules/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 0000000..fd7208b --- /dev/null +++ b/charts/modules/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.prometheusRule.namespace | quote}} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "common.names.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/modules/rabbitmq/templates/role.yaml b/charts/modules/rabbitmq/templates/role.yaml new file mode 100644 index 0000000..ab8be2f --- /dev/null +++ b/charts/modules/rabbitmq/templates/role.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +{{- end }} diff --git a/charts/modules/rabbitmq/templates/rolebinding.yaml b/charts/modules/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 0000000..b086f16 --- /dev/null +++ b/charts/modules/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} +{{- end }} diff --git a/charts/modules/rabbitmq/templates/secrets.yaml b/charts/modules/rabbitmq/templates/secrets.yaml new file mode 100644 index 0000000..97d4890 --- /dev/null +++ b/charts/modules/rabbitmq/templates/secrets.yaml @@ -0,0 +1,39 @@ +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if (not .Values.auth.existingPasswordSecret ) }} + rabbitmq-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "rabbitmq-password" "length" 16 "providedValues" (list "auth.password") "context" $) }} + {{ end }} + {{- if (not .Values.auth.existingErlangSecret ) }} + rabbitmq-erlang-cookie: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "rabbitmq-erlang-cookie" "length" 32 "providedValues" (list "auth.erlangCookie") "context" $) }} + {{ end }} +{{- end }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ ternary (printf "%s-%s" $.Release.Name $key) $key $.Values.extraSecretsPrependReleaseName }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} diff --git a/charts/modules/rabbitmq/templates/serviceaccount.yaml b/charts/modules/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 0000000..43c45d1 --- /dev/null +++ b/charts/modules/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,24 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.serviceAccount.annotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +secrets: + - name: {{ include "common.names.fullname" . }} +{{- end }} + diff --git a/charts/modules/rabbitmq/templates/servicemonitor.yaml b/charts/modules/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 0000000..a96ae90 --- /dev/null +++ b/charts/modules/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel | quote }} + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} + {{- if .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.podTargetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.targetLabels "context" $) | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/modules/rabbitmq/templates/statefulset.yaml b/charts/modules/rabbitmq/templates/statefulset.yaml new file mode 100644 index 0000000..200749e --- /dev/null +++ b/charts/modules/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,426 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.statefulsetLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.statefulsetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/config-secret.yaml") . | sha256sum }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.dnsConfig "context" .) | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "{{ .Values.persistence.mountPath }}" + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.persistence.mountPath }}" + {{- end }} + find "{{ .Values.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- else }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: {{ printf "%s-headless" (include "common.names.fullname" .) }} + - name: K8S_ADDRESS_TYPE + value: {{ .Values.clustering.addressType }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "{{ .Values.persistence.mountPath }}/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + {{- if .Values.logs }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + {{- end }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + - name: RABBITMQ_CLUSTER_REBALANCE + value: "true" + {{- end }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: {{ ternary "yes" "no" .Values.loadDefinition.enabled | quote }} + - name: RABBITMQ_DEFINITIONS_FILE + value: {{ .Values.loadDefinition.file | quote }} + - name: RABBITMQ_SECURE_PASSWORD + value: {{ ternary "yes" "no" (or .Values.auth.securePassword (not .Values.auth.password)) | quote }} + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: amqp + containerPort: {{ .Values.containerPorts.amqp }} + - name: dist + containerPort: {{ .Values.containerPorts.dist }} + - name: stats + containerPort: {{ .Values.containerPorts.manager }} + - name: epmd + containerPort: {{ .Values.containerPorts.epmd }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.containerPorts.metrics }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-ssl + containerPort: {{ .Values.containerPorts.amqpTls }} + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraContainerPorts "context" $) | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q ping + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: {{ternary "amqp-ssl" "amqp" .Values.auth.tls.enabled }} + {{- end }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + {{- if and .Values.ldap.tls.enabled .Values.ldap.tls.certificatesSecret }} + - name: ldap-certs + mountPath: {{ .Values.ldap.tls.certificatesMountPath }} + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.auth.tls.enabled }} + - name: certs + secret: + secretName: {{ template "rabbitmq.tlsSecretName" . }} + items: + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- end }} + {{- if and .Values.ldap.tls.enabled .Values.ldap.tls.certificatesSecret }} + - name: ldap-certs + secret: + secretName: {{ .Values.ldap.tls.certificatesSecret }} + {{- end }} + - name: configuration + secret: + secretName: {{ printf "%s-config" (include "common.names.fullname" .) }} + items: + - key: rabbitmq.conf + path: rabbitmq.conf + {{- if .Values.advancedConfiguration }} + - key: advanced.config + path: advanced.config + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "rabbitmq.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ tpl .Values.initScriptsCM . | quote }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ tpl .Values.initScriptsSecret . | quote }} + defaultMode: 0755 + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + {{- if .Values.persistence.annotations }} + annotations: + {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} diff --git a/charts/modules/rabbitmq/templates/svc-headless.yaml b/charts/modules/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 0000000..430befe --- /dev/null +++ b/charts/modules/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }}-headless + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotationsHeadless .Values.commonAnnotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotationsHeadless }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotationsHeadless "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.portNames.epmd }} + port: {{ .Values.service.ports.epmd }} + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portNames.amqp }} + port: {{ .Values.service.ports.amqp }} + targetPort: amqp + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.portNames.amqpTls }} + port: {{ .Values.service.ports.amqpTls }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.portNames.dist }} + port: {{ .Values.service.ports.dist }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.portNames.manager }} + port: {{ .Values.service.ports.manager }} + targetPort: stats + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + publishNotReadyAddresses: true diff --git a/charts/modules/rabbitmq/templates/svc.yaml b/charts/modules/rabbitmq/templates/svc.yaml new file mode 100644 index 0000000..ba6e796 --- /dev/null +++ b/charts/modules/rabbitmq/templates/svc.yaml @@ -0,0 +1,111 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.service.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or (.Values.service.annotations) (.Values.commonAnnotations) }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end -}} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portNames.amqp }} + port: {{ .Values.service.ports.amqp }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.amqp)) }} + nodePort: {{ .Values.service.nodePorts.amqp }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.portNames.amqpTls }} + port: {{ .Values.service.ports.amqpTls }} + targetPort: amqp-ssl + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.amqpTls)) }} + nodePort: {{ .Values.service.nodePorts.amqpTls }} + {{- end }} + {{- end }} + {{- if .Values.service.epmdPortEnabled }} + - name: {{ .Values.service.portNames.epmd }} + port: {{ .Values.service.ports.epmd }} + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.epmd)) }} + nodePort: {{ .Values.service.nodePorts.epmd }} + {{- end }} + {{- end }} + {{- if .Values.service.distPortEnabled }} + - name: {{ .Values.service.portNames.dist }} + port: {{ .Values.service.ports.dist }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.dist)) }} + nodePort: {{ .Values.service.nodePorts.dist }} + {{- end }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.portNames.manager }} + port: {{ .Values.service.ports.manager }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.manager)) }} + nodePort: {{ .Values.service.nodePorts.manager }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.portNames.metrics }} + port: {{ .Values.service.ports.metrics }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.metrics)) }} + nodePort: {{ .Values.service.nodePorts.metrics }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} diff --git a/charts/modules/rabbitmq/templates/tls-secrets.yaml b/charts/modules/rabbitmq/templates/tls-secrets.yaml new file mode 100644 index 0000000..20bb5c6 --- /dev/null +++ b/charts/modules/rabbitmq/templates/tls-secrets.yaml @@ -0,0 +1,81 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + {{- if .Values.ingress.existingSecret }} + name: {{ .Values.ingress.existingSecret }} + {{- else }} + name: {{ printf "%s-tls" .Values.ingress.hostname | trunc 63 | trimSuffix "-" }} + {{- end }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} + ca.crt: {{ $ca.Cert | b64enc | quote }} +{{- end }} +{{- end }} +{{- if (include "rabbitmq.createTlsSecret" . ) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-certs + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if or (not .Values.auth.tls.autoGenerated ) (and .Values.auth.tls.caCertificate .Values.auth.tls.serverCertificate .Values.auth.tls.serverKey) }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate| b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 365 }} + {{- $fullname := include "common.names.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "common.names.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/charts/modules/rabbitmq/templates/validation.yaml b/charts/modules/rabbitmq/templates/validation.yaml new file mode 100644 index 0000000..f72ef7f --- /dev/null +++ b/charts/modules/rabbitmq/templates/validation.yaml @@ -0,0 +1,2 @@ +{{- include "rabbitmq.validateValues" . }} + diff --git a/charts/modules/rabbitmq/values.schema.json b/charts/modules/rabbitmq/values.schema.json new file mode 100644 index 0000000..8ef33ef --- /dev/null +++ b/charts/modules/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + } + } + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Prometheus metrics for RabbitMQ", + "description": "Install Prometheus plugin in the RabbitMQ container", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/charts/modules/rabbitmq/values.yaml b/charts/modules/rabbitmq/values.yaml new file mode 100644 index 0000000..d210b73 --- /dev/null +++ b/charts/modules/rabbitmq/values.yaml @@ -0,0 +1,1313 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section RabbitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry RabbitMQ image registry +## @param image.repository RabbitMQ image repository +## @param image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.digest RabbitMQ image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: docker.io + repository: bitnami/rabbitmq + tag: 3.11.4-debian-11-r0 + digest: "" + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + +## @section Common parameters +## + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param dnsPolicy DNS Policy for pod +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## E.g. +## dnsPolicy: ClusterFirst +## +dnsPolicy: "" +## @param dnsConfig DNS Configuration pod +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## E.g. +## dnsConfig: +## options: +## - name: ndots +## value: "4" +## +dnsConfig: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + username: user + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + password: "" + ## @param auth.securePassword Whether to set the RabbitMQ password securely. This is incompatible with loading external RabbitMQ definitions and 'true' when not setting the auth.password parameter. + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + securePassword: true + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + erlangCookie: "" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + existingSecret: "" + existingSecretFullChain: false + +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65536" +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" + +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256MB + ## + value: 0.4 + +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap" + +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: true + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + ## @param clustering.partitionHandling Switch Partition Handling Strategy. Either `autoheal` or `pause-minority` or `pause-if-all-down` or `ignore` + ## ref: https://www.rabbitmq.com/partitions.html#automatic-handling + ## + partitionHandling: autoheal + +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.file Name of the definitions file + ## + file: "/app/load_definition.json" + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" + +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +## +args: [] +## @param lifecycleHooks Overwrite livecycle for the RabbitMQ container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## Container Ports +## @param containerPorts.amqp +## @param containerPorts.amqpTls +## @param containerPorts.dist +## @param containerPorts.manager +## @param containerPorts.epmd +## @param containerPorts.metrics +## +containerPorts: + amqp: 5672 + amqpTls: 5671 + dist: 25672 + manager: 15672 + epmd: 4369 + metrics: 9419 + +## @param initScripts Dictionary of init scripts. Evaluated as a template. +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## For example: +## initScripts: +## my_init_script.sh: | +## #!/bin/sh +## echo "Do something." +## +initScripts: {} +## @param initScriptsCM ConfigMap with the init scripts. Evaluated as a template. +## Note: This will override initScripts +## +initScriptsCM: "" +## @param initScriptsSecret Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. +## +initScriptsSecret: "" +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## +configuration: |- + ## Username and password + ## + default_user = {{ .Values.auth.username }} + {{- if and (not .Values.auth.securePassword) .Values.auth.password }} + default_pass = {{ .Values.auth.password }} + {{- end }} + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = {{ .Values.clustering.partitionHandling }} + {{- end }} + {{- if .Values.loadDefinition.enabled }} + load_definitions = {{ .Values.loadDefinition.file }} + {{- end }} + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + {{ tpl .Values.extraConfiguration . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.ports.amqpTls }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1.authn = ldap + auth_backends.1.authz = {{ ternary "ldap" "internal" .Values.ldap.authorisationEnabled }} + auth_backends.2 = internal + {{- $host := list }} + {{- $port := ternary 636 389 .Values.ldap.tls.enabled }} + {{- if .Values.ldap.uri }} + {{- $hostPort := get (urlParse .Values.ldap.uri) "host" }} + {{- $host = list (index (splitList ":" $hostPort) 0) -}} + {{- if (contains ":" $hostPort) }} + {{- $port = index (splitList ":" $hostPort) 1 -}} + {{- end }} + {{- end }} + {{- range $index, $server := concat $host .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ coalesce .Values.ldap.port $port }} + {{- if or .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} + auth_ldap.user_dn_pattern = {{ coalesce .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} + {{- end }} + {{- if .Values.ldap.basedn }} + auth_ldap.dn_lookup_base = {{ .Values.ldap.basedn }} + {{- end }} + {{- if .Values.ldap.uidField }} + auth_ldap.dn_lookup_attribute = {{ .Values.ldap.uidField }} + {{- end }} + {{- if .Values.ldap.binddn }} + auth_ldap.dn_lookup_bind.user_dn = {{ .Values.ldap.binddn }} + auth_ldap.dn_lookup_bind.password = {{ required "'ldap.bindpw' is required when 'ldap.binddn' is defined" .Values.ldap.bindpw }} + {{- end }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = {{ not .Values.ldap.tls.startTls }} + auth_ldap.use_starttls = {{ .Values.ldap.tls.startTls }} + {{- if .Values.ldap.tls.CAFilename }} + auth_ldap.ssl_options.cacertfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ .Values.ldap.tls.CAFilename }} + {{- end }} + {{- if .Values.ldap.tls.certFilename }} + auth_ldap.ssl_options.certfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ .Values.ldap.tls.certFilename }} + auth_ldap.ssl_options.keyfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ required "'ldap.tls.certKeyFilename' is required when 'ldap.tls.certFilename' is defined" .Values.ldap.tls.certKeyFilename }} + {{- end }} + {{- if .Values.ldap.tls.skipVerify }} + auth_ldap.ssl_options.verify = verify_none + auth_ldap.ssl_options.fail_if_no_peer_cert = false + {{- else if .Values.ldap.tls.verify }} + auth_ldap.ssl_options.verify = {{ .Values.ldap.tls.verify }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + ## Prometheus metrics + ## + prometheus.tcp.port = 9419 + {{- end }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB + +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## LDAP authorisation example: +## advancedConfiguration: |- +## [{rabbitmq_auth_backend_ldap,[ +## {tag_queries, [{administrator, {constant, true}}, +## {management, {constant, true}}]} +## ]}]. +## +advancedConfiguration: |- + +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.uri LDAP connection string. + ## + uri: "" + ## @param ldap.servers List of LDAP servers hostnames. This is valid only if ldap.uri is not set + ## + servers: [] + ## @param ldap.port LDAP servers port. This is valid only if ldap.uri is not set + ## + port: "" + + ## DEPRECATED ldap.user_dn_pattern it will removed in a future, please use userDnPattern instead + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.userDnPattern Pattern used to translate the provided username into a value to be used for the LDAP bind. + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + userDnPattern: "" + ## @param ldap.binddn DN of the account used to search in the LDAP server. + ## + binddn: "" + ## @param ldap.bindpw Password for binddn account. + ## + bindpw: "" + ## @param ldap.basedn Base DN path where binddn account will search for the users. + ## + basedn: "" + ## @param ldap.uidField Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration + ##��ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + ## @param ldap.uidField Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration + uidField: "" + ## @param ldap.authorisationEnabled Enable LDAP authorisation. Please set 'advancedConfiguration' with tag, topic, resources and vhost mappings + ## ref: https://www.rabbitmq.com/ldap.html#authorisation + ## + authorisationEnabled: false + ## @param ldap.tls.enabled Enabled TLS configuration. + ## @param ldap.tls.startTls Use STARTTLS instead of LDAPS. + ## @param ldap.tls.skipVerify Skip any SSL verification (hostanames or certificates) + ## @param ldap.tls.verify Verify connection. Valid values are 'verify_peer' or 'verify_none' + ## @param ldap.tls.certificatesMountPath Where LDAP certifcates are mounted. + ## @param ldap.tls.certificatesSecret Secret with LDAP certificates. + ## @param ldap.tls.CAFilename CA certificate filename. Should match with the CA entry key in the ldap.tls.certificatesSecret. + ## @param ldap.tls.certFilename Client certificate filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. + ## @param ldap.tls.certKeyFilename Client Key filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. + ## + tls: + enabled: false + startTls: false + skipVerify: false + verify: "verify_peer" + certificatesMountPath: /opt/bitnami/rabbitmq/ldap/certs + certificatesSecret: "" + CAFilename: "" + certFilename: "" + certKeyFilename: "" + +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false + +## @section Statefulset parameters +## + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param updateStrategy.type Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] + +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroup Set RabbitMQ pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 + +## @param containerSecurityContext.enabled Enabled RabbitMQ containers' Security Context +## @param containerSecurityContext.runAsUser Set RabbitMQ containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set RabbitMQ container's Security Context runAsNonRoot +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + +## RabbitMQ containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + ## Example: + ## limits: + ## cpu: 1000m + ## memory: 2Gi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 1000m + ## memory: 2Gi + ## + requests: {} + +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## Configure RabbitMQ containers' extra options for startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param startupProbe.enabled Enable startupProbe +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" + +## @section RBAC parameters +## + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Auto-mount the service account token in the pod + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + annotations: {} + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessModes PVC Access Modes for RabbitMQ data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "" + ## @param persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom RabbitMQ images + ## + mountPath: /bitnami/rabbitmq/mnesia + ## @param persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 8Gi + ## @param persistence.annotations Persistence annotations. Evaluated as a template + ## Example: + ## annotations: + ## example.io/disk-volume-type: SSD + ## + annotations: {} + +## @section Exposure parameters +## + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + ## + portEnabled: true + ## @param service.distPortEnabled Erlang distribution server port + ## + distPortEnabled: true + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + managerPortEnabled: true + ## @param service.epmdPortEnabled RabbitMQ EPMD Discovery service port + ## + epmdPortEnabled: true + ## Service ports + ## @param service.ports.amqp Amqp service port + ## @param service.ports.amqpTls Amqp TLS service port + ## @param service.ports.dist Erlang distribution service port + ## @param service.ports.manager RabbitMQ Manager service port + ## @param service.ports.metrics RabbitMQ Prometheues metrics service port + ## @param service.ports.epmd EPMD Discovery service port + ## + ports: + amqp: 5672 + amqpTls: 5671 + dist: 25672 + manager: 15672 + metrics: 9419 + epmd: 4369 + ## Service ports name + ## @param service.portNames.amqp Amqp service port name + ## @param service.portNames.amqpTls Amqp TLS service port name + ## @param service.portNames.dist Erlang distribution service port name + ## @param service.portNames.manager RabbitMQ Manager service port name + ## @param service.portNames.metrics RabbitMQ Prometheues metrics service port name + ## @param service.portNames.epmd EPMD Discovery service port name + ## + portNames: + amqp: "amqp" + amqpTls: "amqp-ssl" + dist: "dist" + manager: "http-stats" + metrics: "metrics" + epmd: "epmd" + + ## Node ports to expose + ## @param service.nodePorts.amqp Node port for Ampq + ## @param service.nodePorts.amqpTls Node port for Ampq TLS + ## @param service.nodePorts.dist Node port for Erlang distribution + ## @param service.nodePorts.manager Node port for RabbitMQ Manager + ## @param service.nodePorts.epmd Node port for EPMD Discovery + ## @param service.nodePorts.metrics Node port for RabbitMQ Prometheues metrics + ## + nodePorts: + amqp: "" + amqpTls: "" + dist: "" + manager: "" + epmd: "" + metrics: "" + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: [] + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + ## @param service.clusterIP Kubernetes service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraRules The list of additional rules to be added to this ingress record. Evaluated as a template + ## Useful when looking for additional customization, such as using different backend + ## + extraRules: [] + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.existingSecret It is you own the certificate as secret. + existingSecret: "" + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.additionalRules Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## e.g: + ## additionalRules: + ## - matchLabels: + ## - role: frontend + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + additionalRules: [] + +## @section Metrics Parameters +## + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.ports.metrics }}" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping. + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricsRelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.path Define the path used by ServiceMonitor to scrap metrics + ## Could be /metrics for aggregated metrics or /metrics/per-object for more details + ## + path: "" + ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param metrics.serviceMonitor.annotations Extra annotations for the ServiceMonitor + ## + annotations: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "common.names.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "common.names.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "common.names.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "common.names.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "common.names.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "common.names.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] + +## @section Init Container Parameters +## + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r56 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 diff --git a/charts/modules/redis/.helmignore b/charts/modules/redis/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/charts/modules/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/modules/redis/Chart.lock b/charts/modules/redis/Chart.lock new file mode 100644 index 0000000..ac00fca --- /dev/null +++ b/charts/modules/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.1.2 +digest: sha256:1c365a4551a2f4098e9584dc176b289c10437c679c7c3e2ec6153cabf863e1a4 +generated: "2022-11-03T05:41:14.817736977Z" diff --git a/charts/modules/redis/Chart.yaml b/charts/modules/redis/Chart.yaml new file mode 100644 index 0000000..d3e9d0e --- /dev/null +++ b/charts/modules/redis/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 7.0.5 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: Redis(R) is an open source, advanced key-value store. It is often referred + to as a data structure server since keys can contain strings, hashes, lists, sets + and sorted sets. +home: https://github.com/bitnami/charts/tree/main/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: redis +sources: +- https://github.com/bitnami/containers/tree/main/bitnami/redis +version: 17.3.11 diff --git a/charts/modules/redis/README.md b/charts/modules/redis/README.md new file mode 100644 index 0000000..0c097f5 --- /dev/null +++ b/charts/modules/redis/README.md @@ -0,0 +1,918 @@ + + +# Bitnami package for Redis(R) + +Redis(R) is an open source, advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. + +[Overview of Redis®](http://redis.io) + +Disclaimer: Redis is a registered trademark of Redis Ltd. Any rights therein are reserved to Redis Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Ltd. + +## TL;DR + +```bash +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/redis +``` + +## Introduction + +This chart bootstraps a [Redis®](https://github.com/bitnami/containers/tree/main/bitnami/redis) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +### Choose between Redis® Helm Chart and Redis® Cluster Helm Chart + +You can choose any of the two Redis® Helm charts for deploying a Redis® cluster. + +1. [Redis® Helm Chart](https://github.com/bitnami/charts/tree/main/bitnami/redis) will deploy a master-replica cluster, with the [option](https://github.com/bitnami/charts/tree/main/bitnami/redis#redis-sentinel-configuration-parameters) of enabling using Redis® Sentinel. +2. [Redis® Cluster Helm Chart](https://github.com/bitnami/charts/tree/main/bitnami/redis-cluster) will deploy a Redis® Cluster topology with sharding. + +The main features of each chart are the following: + +| Redis® | Redis® Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![Redis® Topology](img/redis-topology.png) | ![Redis® Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release my-repo/redis +``` + +The command deploys Redis® on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ------------------------------------------------------ | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.redis.password` | Global Redis® password (overrides `auth.password`) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `secretAnnotations` | Annotations to add to secret | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### Redis® Image parameters + +| Name | Description | Value | +| ------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------- | +| `image.registry` | Redis® image registry | `docker.io` | +| `image.repository` | Redis® image repository | `bitnami/redis` | +| `image.tag` | Redis® image tag (immutable tags are recommended) | `7.0.5-debian-11-r15` | +| `image.digest` | Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Redis® image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Redis® image pull secrets | `[]` | +| `image.debug` | Enable image debug mode | `false` | + + +### Redis® common configuration parameters + +| Name | Description | Value | +| -------------------------------- | ------------------------------------------------------------------------------------- | ------------- | +| `architecture` | Redis® architecture. Allowed values: `standalone` or `replication` | `replication` | +| `auth.enabled` | Enable password authentication | `true` | +| `auth.sentinel` | Enable password authentication on sentinels too | `true` | +| `auth.password` | Redis® password | `""` | +| `auth.existingSecret` | The name of an existing secret with Redis® credentials | `""` | +| `auth.existingSecretPasswordKey` | Password key to be retrieved from existing secret | `""` | +| `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` | +| `commonConfiguration` | Common configuration to be added into the ConfigMap | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Redis® nodes | `""` | + + +### Redis® master configuration parameters + +| Name | Description | Value | +| ---------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ------------------------ | +| `master.count` | Number of Redis® master instances to deploy (experimental, requires additional configuration) | `1` | +| `master.configuration` | Configuration for Redis® master nodes | `""` | +| `master.disableCommands` | Array with Redis® commands to disable on master nodes | `["FLUSHDB","FLUSHALL"]` | +| `master.command` | Override default container command (useful when using custom images) | `[]` | +| `master.args` | Override default container args (useful when using custom images) | `[]` | +| `master.preExecCmds` | Additional commands to run prior to starting Redis® master | `[]` | +| `master.extraFlags` | Array with additional command line flags for Redis® master | `[]` | +| `master.extraEnvVars` | Array with extra environment variables to add to Redis® master nodes | `[]` | +| `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis® master nodes | `""` | +| `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis® master nodes | `""` | +| `master.containerPorts.redis` | Container port to open on Redis® master nodes | `6379` | +| `master.startupProbe.enabled` | Enable startupProbe on Redis® master nodes | `false` | +| `master.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `20` | +| `master.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | +| `master.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `master.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `master.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `master.livenessProbe.enabled` | Enable livenessProbe on Redis® master nodes | `true` | +| `master.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `master.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `master.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `master.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `master.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `master.readinessProbe.enabled` | Enable readinessProbe on Redis® master nodes | `true` | +| `master.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `master.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `master.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `master.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `master.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `master.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `master.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `master.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `master.resources.limits` | The resources limits for the Redis® master containers | `{}` | +| `master.resources.requests` | The requested resources for the Redis® master containers | `{}` | +| `master.podSecurityContext.enabled` | Enabled Redis® master pods' Security Context | `true` | +| `master.podSecurityContext.fsGroup` | Set Redis® master pod's Security Context fsGroup | `1001` | +| `master.containerSecurityContext.enabled` | Enabled Redis® master containers' Security Context | `true` | +| `master.containerSecurityContext.runAsUser` | Set Redis® master containers' Security Context runAsUser | `1001` | +| `master.kind` | Use either Deployment or StatefulSet (default) | `StatefulSet` | +| `master.schedulerName` | Alternate scheduler for Redis® master pods | `""` | +| `master.updateStrategy.type` | Redis® master statefulset strategy type | `RollingUpdate` | +| `master.priorityClassName` | Redis® master pods' priorityClassName | `""` | +| `master.hostAliases` | Redis® master pods host aliases | `[]` | +| `master.podLabels` | Extra labels for Redis® master pods | `{}` | +| `master.podAnnotations` | Annotations for Redis® master pods | `{}` | +| `master.shareProcessNamespace` | Share a single process namespace between all of the containers in Redis® master pods | `false` | +| `master.podAffinityPreset` | Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `master.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.key` | Node label key to match. Ignored if `master.affinity` is set | `""` | +| `master.nodeAffinityPreset.values` | Node label values to match. Ignored if `master.affinity` is set | `[]` | +| `master.affinity` | Affinity for Redis® master pods assignment | `{}` | +| `master.nodeSelector` | Node labels for Redis® master pods assignment | `{}` | +| `master.tolerations` | Tolerations for Redis® master pods assignment | `[]` | +| `master.topologySpreadConstraints` | Spread Constraints for Redis® master pod assignment | `[]` | +| `master.dnsPolicy` | DNS Policy for Redis® master pod | `""` | +| `master.dnsConfig` | DNS Configuration for Redis® master pod | `{}` | +| `master.lifecycleHooks` | for the Redis® master container(s) to automate configuration before or after startup | `{}` | +| `master.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® master pod(s) | `[]` | +| `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® master container(s) | `[]` | +| `master.sidecars` | Add additional sidecar containers to the Redis® master pod(s) | `[]` | +| `master.initContainers` | Add additional init containers to the Redis® master pod(s) | `[]` | +| `master.persistence.enabled` | Enable persistence on Redis® master nodes using Persistent Volume Claims | `true` | +| `master.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `master.persistence.sizeLimit` | Set this to enable a size limit for `emptyDir` volumes. | `""` | +| `master.persistence.path` | The path the volume will be mounted at on Redis® master containers | `/data` | +| `master.persistence.subPath` | The subdirectory of the volume to mount on Redis® master containers | `""` | +| `master.persistence.subPathExpr` | Used to construct the subPath subdirectory of the volume to mount on Redis® master containers | `""` | +| `master.persistence.storageClass` | Persistent Volume storage class | `""` | +| `master.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `master.persistence.size` | Persistent Volume size | `8Gi` | +| `master.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `master.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `master.persistence.dataSource` | Custom PVC data source | `{}` | +| `master.persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `""` | +| `master.service.type` | Redis® master service type | `ClusterIP` | +| `master.service.ports.redis` | Redis® master service port | `6379` | +| `master.service.nodePorts.redis` | Node port for Redis® master | `""` | +| `master.service.externalTrafficPolicy` | Redis® master service external traffic policy | `Cluster` | +| `master.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `master.service.internalTrafficPolicy` | Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) | `Cluster` | +| `master.service.clusterIP` | Redis® master service Cluster IP | `""` | +| `master.service.loadBalancerIP` | Redis® master service Load Balancer IP | `""` | +| `master.service.loadBalancerSourceRanges` | Redis® master service Load Balancer sources | `[]` | +| `master.service.annotations` | Additional custom annotations for Redis® master service | `{}` | +| `master.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `master.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `master.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-master pods | `30` | +| `master.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `master.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `master.serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` | +| `master.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + + +### Redis® replicas configuration parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------------ | +| `replica.replicaCount` | Number of Redis® replicas to deploy | `3` | +| `replica.configuration` | Configuration for Redis® replicas nodes | `""` | +| `replica.disableCommands` | Array with Redis® commands to disable on replicas nodes | `["FLUSHDB","FLUSHALL"]` | +| `replica.command` | Override default container command (useful when using custom images) | `[]` | +| `replica.args` | Override default container args (useful when using custom images) | `[]` | +| `replica.preExecCmds` | Additional commands to run prior to starting Redis® replicas | `[]` | +| `replica.extraFlags` | Array with additional command line flags for Redis® replicas | `[]` | +| `replica.extraEnvVars` | Array with extra environment variables to add to Redis® replicas nodes | `[]` | +| `replica.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis® replicas nodes | `""` | +| `replica.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis® replicas nodes | `""` | +| `replica.externalMaster.enabled` | Use external master for bootstrapping | `false` | +| `replica.externalMaster.host` | External master host to bootstrap from | `""` | +| `replica.externalMaster.port` | Port for Redis service external master host | `6379` | +| `replica.containerPorts.redis` | Container port to open on Redis® replicas nodes | `6379` | +| `replica.startupProbe.enabled` | Enable startupProbe on Redis® replicas nodes | `true` | +| `replica.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `replica.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `replica.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `replica.startupProbe.failureThreshold` | Failure threshold for startupProbe | `22` | +| `replica.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `replica.livenessProbe.enabled` | Enable livenessProbe on Redis® replicas nodes | `true` | +| `replica.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `replica.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `replica.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `replica.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `replica.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `replica.readinessProbe.enabled` | Enable readinessProbe on Redis® replicas nodes | `true` | +| `replica.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `replica.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `replica.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `replica.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `replica.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `replica.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `replica.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `replica.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `replica.resources.limits` | The resources limits for the Redis® replicas containers | `{}` | +| `replica.resources.requests` | The requested resources for the Redis® replicas containers | `{}` | +| `replica.podSecurityContext.enabled` | Enabled Redis® replicas pods' Security Context | `true` | +| `replica.podSecurityContext.fsGroup` | Set Redis® replicas pod's Security Context fsGroup | `1001` | +| `replica.containerSecurityContext.enabled` | Enabled Redis® replicas containers' Security Context | `true` | +| `replica.containerSecurityContext.runAsUser` | Set Redis® replicas containers' Security Context runAsUser | `1001` | +| `replica.schedulerName` | Alternate scheduler for Redis® replicas pods | `""` | +| `replica.updateStrategy.type` | Redis® replicas statefulset strategy type | `RollingUpdate` | +| `replica.priorityClassName` | Redis® replicas pods' priorityClassName | `""` | +| `replica.podManagementPolicy` | podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods | `""` | +| `replica.hostAliases` | Redis® replicas pods host aliases | `[]` | +| `replica.podLabels` | Extra labels for Redis® replicas pods | `{}` | +| `replica.podAnnotations` | Annotations for Redis® replicas pods | `{}` | +| `replica.shareProcessNamespace` | Share a single process namespace between all of the containers in Redis® replicas pods | `false` | +| `replica.podAffinityPreset` | Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `replica.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `replica.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `replica.nodeAffinityPreset.key` | Node label key to match. Ignored if `replica.affinity` is set | `""` | +| `replica.nodeAffinityPreset.values` | Node label values to match. Ignored if `replica.affinity` is set | `[]` | +| `replica.affinity` | Affinity for Redis® replicas pods assignment | `{}` | +| `replica.nodeSelector` | Node labels for Redis® replicas pods assignment | `{}` | +| `replica.tolerations` | Tolerations for Redis® replicas pods assignment | `[]` | +| `replica.topologySpreadConstraints` | Spread Constraints for Redis® replicas pod assignment | `[]` | +| `replica.dnsPolicy` | DNS Policy for Redis® replica pods | `""` | +| `replica.dnsConfig` | DNS Configuration for Redis® replica pods | `{}` | +| `replica.lifecycleHooks` | for the Redis® replica container(s) to automate configuration before or after startup | `{}` | +| `replica.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® replicas pod(s) | `[]` | +| `replica.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) | `[]` | +| `replica.sidecars` | Add additional sidecar containers to the Redis® replicas pod(s) | `[]` | +| `replica.initContainers` | Add additional init containers to the Redis® replicas pod(s) | `[]` | +| `replica.persistence.enabled` | Enable persistence on Redis® replicas nodes using Persistent Volume Claims | `true` | +| `replica.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `replica.persistence.sizeLimit` | Set this to enable a size limit for `emptyDir` volumes. | `""` | +| `replica.persistence.path` | The path the volume will be mounted at on Redis® replicas containers | `/data` | +| `replica.persistence.subPath` | The subdirectory of the volume to mount on Redis® replicas containers | `""` | +| `replica.persistence.subPathExpr` | Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers | `""` | +| `replica.persistence.storageClass` | Persistent Volume storage class | `""` | +| `replica.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `replica.persistence.size` | Persistent Volume size | `8Gi` | +| `replica.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `replica.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `replica.persistence.dataSource` | Custom PVC data source | `{}` | +| `replica.persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `""` | +| `replica.service.type` | Redis® replicas service type | `ClusterIP` | +| `replica.service.ports.redis` | Redis® replicas service port | `6379` | +| `replica.service.nodePorts.redis` | Node port for Redis® replicas | `""` | +| `replica.service.externalTrafficPolicy` | Redis® replicas service external traffic policy | `Cluster` | +| `replica.service.internalTrafficPolicy` | Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) | `Cluster` | +| `replica.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `replica.service.clusterIP` | Redis® replicas service Cluster IP | `""` | +| `replica.service.loadBalancerIP` | Redis® replicas service Load Balancer IP | `""` | +| `replica.service.loadBalancerSourceRanges` | Redis® replicas service Load Balancer sources | `[]` | +| `replica.service.annotations` | Additional custom annotations for Redis® replicas service | `{}` | +| `replica.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `replica.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `replica.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-replicas pods | `30` | +| `replica.autoscaling.enabled` | Enable replica autoscaling settings | `false` | +| `replica.autoscaling.minReplicas` | Minimum replicas for the pod autoscaling | `1` | +| `replica.autoscaling.maxReplicas` | Maximum replicas for the pod autoscaling | `11` | +| `replica.autoscaling.targetCPU` | Percentage of CPU to consider when autoscaling | `""` | +| `replica.autoscaling.targetMemory` | Percentage of Memory to consider when autoscaling | `""` | +| `replica.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `replica.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `replica.serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` | +| `replica.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + + +### Redis® Sentinel configuration parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `sentinel.enabled` | Use Redis® Sentinel on Redis® pods. | `false` | +| `sentinel.image.registry` | Redis® Sentinel image registry | `docker.io` | +| `sentinel.image.repository` | Redis® Sentinel image repository | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis® Sentinel image tag (immutable tags are recommended) | `7.0.5-debian-11-r14` | +| `sentinel.image.digest` | Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `sentinel.image.pullPolicy` | Redis® Sentinel image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Redis® Sentinel image pull secrets | `[]` | +| `sentinel.image.debug` | Enable image debug mode | `false` | +| `sentinel.masterSet` | Master set name | `mymaster` | +| `sentinel.quorum` | Sentinel Quorum | `2` | +| `sentinel.getMasterTimeout` | Amount of time to allow before get_sentinel_master_info() times out. | `220` | +| `sentinel.automateClusterRecovery` | Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. | `false` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis® node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `180000` | +| `sentinel.parallelSyncs` | Number of replicas that can be reconfigured in parallel to use the new master after a failover | `1` | +| `sentinel.configuration` | Configuration for Redis® Sentinel nodes | `""` | +| `sentinel.command` | Override default container command (useful when using custom images) | `[]` | +| `sentinel.args` | Override default container args (useful when using custom images) | `[]` | +| `sentinel.preExecCmds` | Additional commands to run prior to starting Redis® Sentinel | `[]` | +| `sentinel.extraEnvVars` | Array with extra environment variables to add to Redis® Sentinel nodes | `[]` | +| `sentinel.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes | `""` | +| `sentinel.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis® Sentinel nodes | `""` | +| `sentinel.externalMaster.enabled` | Use external master for bootstrapping | `false` | +| `sentinel.externalMaster.host` | External master host to bootstrap from | `""` | +| `sentinel.externalMaster.port` | Port for Redis service external master host | `6379` | +| `sentinel.containerPorts.sentinel` | Container port to open on Redis® Sentinel nodes | `26379` | +| `sentinel.startupProbe.enabled` | Enable startupProbe on Redis® Sentinel nodes | `true` | +| `sentinel.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `sentinel.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `sentinel.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `sentinel.startupProbe.failureThreshold` | Failure threshold for startupProbe | `22` | +| `sentinel.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `sentinel.livenessProbe.enabled` | Enable livenessProbe on Redis® Sentinel nodes | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `sentinel.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `sentinel.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `sentinel.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `sentinel.readinessProbe.enabled` | Enable readinessProbe on Redis® Sentinel nodes | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `sentinel.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `sentinel.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `sentinel.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `sentinel.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `sentinel.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `sentinel.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `sentinel.persistence.enabled` | Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) | `false` | +| `sentinel.persistence.storageClass` | Persistent Volume storage class | `""` | +| `sentinel.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `sentinel.persistence.size` | Persistent Volume size | `100Mi` | +| `sentinel.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `sentinel.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `sentinel.persistence.dataSource` | Custom PVC data source | `{}` | +| `sentinel.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `sentinel.resources.limits` | The resources limits for the Redis® Sentinel containers | `{}` | +| `sentinel.resources.requests` | The requested resources for the Redis® Sentinel containers | `{}` | +| `sentinel.containerSecurityContext.enabled` | Enabled Redis® Sentinel containers' Security Context | `true` | +| `sentinel.containerSecurityContext.runAsUser` | Set Redis® Sentinel containers' Security Context runAsUser | `1001` | +| `sentinel.lifecycleHooks` | for the Redis® sentinel container(s) to automate configuration before or after startup | `{}` | +| `sentinel.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® Sentinel | `[]` | +| `sentinel.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) | `[]` | +| `sentinel.service.type` | Redis® Sentinel service type | `ClusterIP` | +| `sentinel.service.ports.redis` | Redis® service port for Redis® | `6379` | +| `sentinel.service.ports.sentinel` | Redis® service port for Redis® Sentinel | `26379` | +| `sentinel.service.nodePorts.redis` | Node port for Redis® | `""` | +| `sentinel.service.nodePorts.sentinel` | Node port for Sentinel | `""` | +| `sentinel.service.externalTrafficPolicy` | Redis® Sentinel service external traffic policy | `Cluster` | +| `sentinel.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `sentinel.service.clusterIP` | Redis® Sentinel service Cluster IP | `""` | +| `sentinel.service.loadBalancerIP` | Redis® Sentinel service Load Balancer IP | `""` | +| `sentinel.service.loadBalancerSourceRanges` | Redis® Sentinel service Load Balancer sources | `[]` | +| `sentinel.service.annotations` | Additional custom annotations for Redis® Sentinel service | `{}` | +| `sentinel.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `sentinel.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `sentinel.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-node pods | `30` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.extraEgress` | Add extra egress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `podSecurityPolicy.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | +| `podSecurityPolicy.enabled` | Enable PodSecurityPolicy's RBAC rules | `false` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `pdb.create` | Specifies whether a PodDisruptionBudget should be created | `false` | +| `pdb.minAvailable` | Min number of pods that must still be available after the eviction | `1` | +| `pdb.maxUnavailable` | Max number of pods that can be unavailable after the eviction | `""` | +| `tls.enabled` | Enable TLS traffic | `false` | +| `tls.authClients` | Require clients to authenticate | `true` | +| `tls.autoGenerated` | Enable autogenerated certificates | `false` | +| `tls.existingSecret` | The name of the existing secret that contains the TLS certificates | `""` | +| `tls.certificatesSecret` | DEPRECATED. Use existingSecret instead. | `""` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate Key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename | `""` | +| `tls.dhParamsFilename` | File containing DH params (in order to support DH based ciphers) | `""` | + + +### Metrics Parameters + +| Name | Description | Value | +| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `metrics.enabled` | Start a sidecar prometheus exporter to expose Redis® metrics | `false` | +| `metrics.image.registry` | Redis® Exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis® Exporter image repository | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis® Exporter image tag (immutable tags are recommended) | `1.45.0-debian-11-r1` | +| `metrics.image.digest` | Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | Redis® Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Redis® Exporter image pull secrets | `[]` | +| `metrics.command` | Override default metrics container init command (useful when using custom images) | `[]` | +| `metrics.redisTargetHost` | A way to specify an alternative Redis® hostname | `localhost` | +| `metrics.extraArgs` | Extra arguments for Redis® exporter, for example: | `{}` | +| `metrics.extraEnvVars` | Array with extra environment variables to add to Redis® exporter | `[]` | +| `metrics.containerSecurityContext.enabled` | Enabled Redis® exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set Redis® exporter containers' Security Context runAsUser | `1001` | +| `metrics.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® metrics sidecar | `[]` | +| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar | `[]` | +| `metrics.resources.limits` | The resources limits for the Redis® exporter container | `{}` | +| `metrics.resources.requests` | The requested resources for the Redis® exporter container | `{}` | +| `metrics.podLabels` | Extra labels for Redis® exporter pods | `{}` | +| `metrics.podAnnotations` | Annotations for Redis® exporter pods | `{}` | +| `metrics.service.type` | Redis® exporter service type | `ClusterIP` | +| `metrics.service.port` | Redis® exporter service port | `9121` | +| `metrics.service.externalTrafficPolicy` | Redis® exporter service external traffic policy | `Cluster` | +| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `metrics.service.loadBalancerIP` | Redis® exporter service Load Balancer IP | `""` | +| `metrics.service.loadBalancerSourceRanges` | Redis® exporter service Load Balancer sources | `[]` | +| `metrics.service.annotations` | Additional custom annotations for Redis® exporter service | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | The namespace in which the ServiceMonitor will be created | `""` | +| `metrics.serviceMonitor.interval` | The interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | The timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Metrics RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | Metrics RelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Labels from the Kubernetes pod to be transferred to the created metrics | `[]` | +| `metrics.prometheusRule.enabled` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.prometheusRule.namespace` | The namespace in which the prometheusRule will be created | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels for the prometheusRule | `{}` | +| `metrics.prometheusRule.rules` | Custom Prometheus rules | `[]` | + + +### Init Container Parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Bitnami Shell image registry | `docker.io` | +| `volumePermissions.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r48` | +| `volumePermissions.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` | +| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` | +| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` | +| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` | +| `sysctl.image.registry` | Bitnami Shell image registry | `docker.io` | +| `sysctl.image.repository` | Bitnami Shell image repository | `bitnami/bitnami-shell` | +| `sysctl.image.tag` | Bitnami Shell image tag (immutable tags are recommended) | `11-debian-11-r48` | +| `sysctl.image.digest` | Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `sysctl.image.pullPolicy` | Bitnami Shell image pull policy | `IfNotPresent` | +| `sysctl.image.pullSecrets` | Bitnami Shell image pull secrets | `[]` | +| `sysctl.command` | Override default init-sysctl container command (useful when using custom images) | `[]` | +| `sysctl.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctl.resources.limits` | The resources limits for the init container | `{}` | +| `sysctl.resources.requests` | The requested resources for the init container | `{}` | + + +### useExternalDNS Parameters + +| Name | Description | Value | +| -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `useExternalDNS.enabled` | Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. | `false` | +| `useExternalDNS.additionalAnnotations` | Extra annotations to be utilized when `external-dns` is enabled. | `{}` | +| `useExternalDNS.annotationKey` | The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations. | `external-dns.alpha.kubernetes.io/` | +| `useExternalDNS.suffix` | The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. | `""` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.password=secretpassword \ + my-repo/redis +``` + +The above command sets the Redis® server password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml my-repo/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Use a different Redis® version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/redis/configuration/change-image-version/). + +### Bootstrapping with an External Cluster + +This chart is equipped with the ability to bring online a set of Pods that connect to an existing Redis deployment that lies outside of Kubernetes. This effectively creates a hybrid Redis Deployment where both Pods in Kubernetes and Instances such as Virtual Machines can partake in a single Redis Deployment. This is helpful in situations where one may be migrating Redis from Virtual Machines into Kubernetes, for example. To take advantage of this, use the following as an example configuration: + +```yaml +replica: + externalMaster: + enabled: true + host: external-redis-0.internal +sentinel: + externalMaster: + enabled: true + host: external-redis-0.internal +``` + +:warning: This is currently limited to clusters in which Sentinel and Redis run on the same node! :warning: + +Please also note that the external sentinel must be listening on port `26379`, and this is currently not configurable. + +Once the Kubernetes Redis Deployment is online and confirmed to be working with the existing cluster, the configuration can then be removed and the cluster will remain connected. + +### External DNS + +This chart is equipped to allow leveraging the ExternalDNS project. Doing so will enable ExternalDNS to publish the FQDN for each instance, in the format of `..`. +Example, when using the following configuration: + +```yaml +useExternalDNS: + enabled: true + suffix: prod.example.org + additionalAnnotations: + ttl: 10 +``` + +On a cluster where the name of the Helm release is `a`, the hostname of a Pod is generated as: `a-redis-node-0.a-redis.prod.example.org`. The IP of that FQDN will match that of the associated Pod. This modifies the following parameters of the Redis/Sentinel configuration using this new FQDN: + +* `replica-announce-ip` +* `known-sentinel` +* `known-replica` +* `announce-ip` + +:warning: This requires a working installation of `external-dns` to be fully functional. :warning: + +See the [official ExternalDNS documentation](https://github.com/kubernetes-sigs/external-dns) for additional configuration options. + +### Cluster topologies + +#### Default: Master-Replicas + +When installing the chart with `architecture=replication`, it will deploy a Redis® master StatefulSet and a Redis® replicas StatefulSet. The replicas will be read-replicas of the master. Two services will be exposed: + +- Redis® Master service: Points to the master, where read-write operations can be performed +- Redis® Replicas service: Points to the replicas, where only read operations are allowed by default. + +In case the master crashes, the replicas will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Standalone + +When installing the chart with `architecture=standalone`, it will deploy a standalone Redis® StatefulSet. A single service will be exposed: + +- Redis® Master service: Points to the master, where read-write operations can be performed + +#### Master-Replicas with Sentinel + +When installing the chart with `architecture=replication` and `sentinel.enabled=true`, it will deploy a Redis® master StatefulSet (only one master allowed) and a Redis® replicas StatefulSet. In this case, the pods will contain an extra container with Redis® Sentinel. This container will form a cluster of Redis® Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + +- Redis® service: Exposes port 6379 for Redis® read-only operations and port 26379 for accessing Redis® Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis® Sentinel cluster and query the current master using the command below (using redis-cli or similar): + +``` +SENTINEL get-master-addr-by-name +``` + +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +`master.count` greater than `1` is not designed for use when `sentinel.enabled=true`. + +### Multiple masters (experimental) + +When `master.count` is greater than `1`, special care must be taken to create a consistent setup. + +An example of use case is the creation of a redundant set of standalone masters or master-replicas per Kubernetes node where you must ensure: +- No more than `1` master can be deployed per Kubernetes node +- Replicas and writers can only see the single master of their own Kubernetes node + +One way of achieving this is by setting `master.service.internalTrafficPolicy=Local` in combination with a `master.affinity.podAntiAffinity` spec to never schedule more than one master per Kubernetes node. + +It's recommended to only change `master.count` if you know what you are doing. +`master.count` greater than `1` is not designed for use when `sentinel.enabled=true`. + +### Using a password file + +To use a password file for Redis® you need to create a secret containing the password and then deploy the chart using that secret. + +Refer to the chart documentation for more information on [using a password file for Redis®](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/use-password-file/). + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.existingSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +Refer to the chart documentation for more information on [creating the secret and a TLS deployment example](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/enable-tls/). + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +Redis® may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. + +Refer to the chart documentation for more information on [configuring host kernel settings with an example](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/configure-kernel-settings/). + +## Persistence + +By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set master.persistence.existingClaim=PVC_NAME my-repo/redis +``` + +## Backup and restore + +Refer to the chart documentation for more information on [backing up and restoring Redis® deployments](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/backup-restore/). + +## NetworkPolicy + +To enable network policy for Redis®, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +Refer to the chart documenation for more information on [enabling the network policy in Redis® deployments](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/enable-network-policy/). + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. + +### To 17.0.0 + +This major version updates the Redis® docker image version used from `6.2` to `7.0`, the new stable version. There are no major changes in the chart, but we recommend checking the [Redis® 7.0 release notes](https://raw.githubusercontent.com/redis/redis/7.0/00-RELEASENOTES) before upgrading. + +### To 16.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +Affected values: +- `master.service.port` renamed as `master.service.ports.redis`. +- `master.service.nodePort` renamed as `master.service.nodePorts.redis`. +- `replica.service.port` renamed as `replica.service.ports.redis`. +- `replica.service.nodePort` renamed as `replica.service.nodePorts.redis`. +- `sentinel.service.port` renamed as `sentinel.service.ports.redis`. +- `sentinel.service.sentinelPort` renamed as `sentinel.service.ports.sentinel`. +- `master.containerPort` renamed as `master.containerPorts.redis`. +- `replica.containerPort` renamed as `replica.containerPorts.redis`. +- `sentinel.containerPort` renamed as `sentinel.containerPorts.sentinel`. +- `master.spreadConstraints` renamed as `master.topologySpreadConstraints` +- `replica.spreadConstraints` renamed as `replica.topologySpreadConstraints` + +### To 15.0.0 + +The parameter to enable the usage of StaticIDs was removed. The behavior is to [always use StaticIDs](https://github.com/bitnami/charts/pull/7278). + +### To 14.8.0 + +The Redis® sentinel exporter was removed in this version because the upstream project was deprecated. The regular Redis® exporter is included in the sentinel scenario as usual. + +### To 14.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - The term *slave* has been replaced by the term *replica*. Therefore, parameters prefixed with `slave` are now prefixed with `replicas`. + - Credentials parameter are reorganized under the `auth` parameter. + - `cluster.enabled` parameter is deprecated in favor of `architecture` parameter that accepts two values: `standalone` and `replication`. + - `securityContext.*` is deprecated in favor of `XXX.podSecurityContext` and `XXX.containerSecurityContext`. + - `sentinel.metrics.*` parameters are deprecated in favor of `metrics.sentinel.*` ones. +- New parameters to add custom command, environment variables, sidecars, init containers, etc. were added. +- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels). +- values.yaml metadata was adapted to follow the format supported by [Readme Generator for Helm](https://github.com/bitnami-labs/readme-generator-for-helm). + +Consequences: + +Backwards compatibility is not guaranteed. To upgrade to `14.0.0`, install a new release of the Redis® chart, and migrate the data from your previous release. You have 2 alternatives to do so: + +- Create a backup of the database, and restore it on the new release as explained in the [Backup and restore](#backup-and-restore) section. +- Reuse the PVC used to hold the master data on your previous release. To do so, use the `master.persistence.existingClaim` parameter. The following example assumes that the release name is `redis`: + +```bash +$ helm install redis my-repo/redis --set auth.password=[PASSWORD] --set master.persistence.existingClaim=[EXISTING_PVC] +``` + +| Note: you need to substitute the placeholder _[EXISTING_PVC]_ with the name of the PVC used on your previous release, and _[PASSWORD]_ with the password used in your previous release. + +### To 13.0.0 + +This major version updates the Redis® docker image version used from `6.0` to `6.2`, the new stable version. There are no major changes in the chart and there shouldn't be any breaking changes in it as `6.2` is basically a stricter superset of `6.0`. For more information, please refer to [Redis® 6.2 release notes](https://raw.githubusercontent.com/redis/redis/6.2/00-RELEASENOTES). + +### To 12.3.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.0.0 + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis® exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 7.0.0 + +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling Redis® Sentinel containers inside of the Redis® Pods (feature disabled by default). In case the master crashes, a new Redis® node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis® Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the Redis® Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release my-repo/redis --set persistence.existingClaim= + ``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis® Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install my-repo/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis® Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### To 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis® StatefulSet before upgrading: + +```bash +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the Redis® slave (and metrics if enabled) deployment: + +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/charts/modules/redis/charts/common/.helmignore b/charts/modules/redis/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/charts/modules/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/modules/redis/charts/common/Chart.yaml b/charts/modules/redis/charts/common/Chart.yaml new file mode 100644 index 0000000..6f0c3a6 --- /dev/null +++ b/charts/modules/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.1.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.1.2 diff --git a/charts/modules/redis/charts/common/README.md b/charts/modules/redis/charts/common/README.md new file mode 100644 index 0000000..a2ecd60 --- /dev/null +++ b/charts/modules/redis/charts/common/README.md @@ -0,0 +1,350 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/charts/modules/redis/charts/common/templates/_affinities.tpl b/charts/modules/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..497068f --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,98 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_capabilities.tpl b/charts/modules/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..9d9b760 --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_errors.tpl b/charts/modules/redis/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_images.tpl b/charts/modules/redis/charts/common/templates/_images.tpl new file mode 100644 index 0000000..46c659e --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_ingress.tpl b/charts/modules/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..831da9c --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_labels.tpl b/charts/modules/redis/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_names.tpl b/charts/modules/redis/charts/common/templates/_names.tpl new file mode 100644 index 0000000..1bdac8b --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_secrets.tpl b/charts/modules/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4267d42 --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_storage.tpl b/charts/modules/redis/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_tplvalues.tpl b/charts/modules/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_utils.tpl b/charts/modules/redis/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..8c22b2a --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/_warnings.tpl b/charts/modules/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/charts/modules/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/validations/_cassandra.tpl b/charts/modules/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/charts/modules/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/validations/_mariadb.tpl b/charts/modules/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/charts/modules/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/validations/_mongodb.tpl b/charts/modules/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..f820ec1 --- /dev/null +++ b/charts/modules/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/validations/_mysql.tpl b/charts/modules/redis/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 0000000..74472a0 --- /dev/null +++ b/charts/modules/redis/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/validations/_postgresql.tpl b/charts/modules/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/charts/modules/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/validations/_redis.tpl b/charts/modules/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..dcccfc1 --- /dev/null +++ b/charts/modules/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/templates/validations/_validations.tpl b/charts/modules/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/charts/modules/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/charts/modules/redis/charts/common/values.yaml b/charts/modules/redis/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/charts/modules/redis/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/charts/modules/redis/img/redis-cluster-topology.png b/charts/modules/redis/img/redis-cluster-topology.png new file mode 100644 index 0000000..f0a02a9 Binary files /dev/null and b/charts/modules/redis/img/redis-cluster-topology.png differ diff --git a/charts/modules/redis/img/redis-topology.png b/charts/modules/redis/img/redis-topology.png new file mode 100644 index 0000000..3f5280f Binary files /dev/null and b/charts/modules/redis/img/redis-topology.png differ diff --git a/charts/modules/redis/templates/NOTES.txt b/charts/modules/redis/templates/NOTES.txt new file mode 100644 index 0000000..2623ade --- /dev/null +++ b/charts/modules/redis/templates/NOTES.txt @@ -0,0 +1,191 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + +For Redis: + + /opt/bitnami/scripts/redis/entrypoint.sh /opt/bitnami/scripts/redis/run.sh + +{{- if .Values.sentinel.enabled }} + +For Redis Sentinel: + + /opt/bitnami/scripts/redis-sentinel/entrypoint.sh /opt/bitnami/scripts/redis-sentinel/run.sh + +{{- end }} +{{- else }} + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "auth.enabled=false" you have + most likely exposed the Redis® service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "auth.enabled=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if eq .Values.architecture "replication" }} +{{- if .Values.sentinel.enabled }} + +Redis® can be accessed via port {{ .Values.sentinel.service.ports.redis }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis® Sentinel cluster, which is available in port {{ .Values.sentinel.service.ports.sentinel }} using the same domain name above. + +{{- else }} + +Redis® can be accessed on the following DNS names from within your cluster: + + {{ printf "%s-master.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read/write operations (port {{ .Values.master.service.ports.redis }}) + {{ printf "%s-replicas.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read-only operations (port {{ .Values.replica.service.ports.redis }}) + +{{- end }} +{{- else }} + +Redis® can be accessed via port {{ .Values.master.service.ports.redis }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.auth.enabled }} + +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 -d) + +{{- end }} + +To connect to your Redis® server: + +1. Run a Redis® pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} redis-client --restart='Never' {{ if .Values.auth.enabled }} --env REDIS_PASSWORD=$REDIS_PASSWORD {{ end }} --image {{ template "redis.image" . }} --command -- sleep infinity + +{{- if .Values.tls.enabled }} + + Copy your TLS certificates to the pod: + + kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.cert redis-client:/tmp/client.cert + kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.key redis-client:/tmp/client.key + kubectl cp --namespace {{ .Release.Namespace }} /path/to/CA.cert redis-client:/tmp/CA.cert + +{{- end }} + + Use the following command to attach to the pod: + + kubectl exec --tty -i redis-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "common.names.fullname" . }}-client=true" \{{- end }} + --namespace {{ .Release.Namespace }} -- bash + +2. Connect using the Redis® CLI: + +{{- if eq .Values.architecture "replication" }} + {{- if .Values.sentinel.enabled }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.ports.redis }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.ports.sentinel }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ printf "%s-master" (include "common.names.fullname" .) }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ printf "%s-replicas" (include "common.names.fullname" .) }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }}-master{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + +Note: Since NetworkPolicy is enabled, only pods with label {{ template "common.names.fullname" . }}-client=true" will be able to connect to redis. + +{{- else }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +{{- if contains "NodePort" .Values.sentinel.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.sentinel.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $SERVICE_IP -p {{ .Values.sentinel.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.sentinel.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "common.names.fullname" . }} {{ .Values.sentinel.service.ports.redis }}:{{ .Values.sentinel.service.ports.redis }} & + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h 127.0.0.1 -p {{ .Values.sentinel.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- else }} +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ printf "%s-master" (include "common.names.fullname" .) }}) + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ printf "%s-master" (include "common.names.fullname" .) }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $SERVICE_IP -p {{ .Values.master.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-master" (include "common.names.fullname" .) }} {{ .Values.master.service.ports.redis }}:{{ .Values.master.service.ports.redis }} & + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h 127.0.0.1 -p {{ .Values.master.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{- end }} +{{- end }} +{{- include "redis.checkRollingTags" . }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.sysctl.image }} +{{- include "redis.validateValues" . }} + +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (not .Release.IsUpgrade ) }} +{{- if $.Values.sentinel.service.nodePorts.sentinel }} +No need to upgrade, ports and nodeports have been set from values +{{- else }} +#!#!#!#!#!#!#!# IMPORTANT #!#!#!#!#!#!#!# +YOU NEED TO PERFORM AN UPGRADE FOR THE SERVICES AND WORKLOAD TO BE CREATED +{{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/_helpers.tpl b/charts/modules/redis/templates/_helpers.tpl new file mode 100644 index 0000000..90064e8 --- /dev/null +++ b/charts/modules/redis/templates/_helpers.tpl @@ -0,0 +1,321 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "redis.sentinel.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sentinel.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sysctl.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.sentinel.image .Values.metrics.image .Values.volumePermissions.image .Values.sysctl.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret object should be created +*/}} +{{- define "redis.createTlsSecret" -}} +{{- if and .Values.tls.enabled .Values.tls.autoGenerated (and (not .Values.tls.existingSecret) (not .Values.tls.certificatesSecret)) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing Redis TLS certificates +*/}} +{{- define "redis.tlsSecretName" -}} +{{- $secretName := coalesce .Values.tls.existingSecret .Values.tls.certificatesSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- if (include "redis.createTlsSecret" . ) -}} + {{- printf "/opt/bitnami/redis/certs/%s" "tls.crt" -}} +{{- else -}} + {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- if (include "redis.createTlsSecret" . ) -}} + {{- printf "/opt/bitnami/redis/certs/%s" "tls.key" -}} +{{- else -}} + {{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- if (include "redis.createTlsSecret" . ) -}} + {{- printf "/opt/bitnami/redis/certs/%s" "ca.crt" -}} +{{- else -}} + {{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the shared service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the master service account to use +*/}} +{{- define "redis.masterServiceAccountName" -}} +{{- if .Values.master.serviceAccount.create -}} + {{ default (printf "%s-master" (include "common.names.fullname" .)) .Values.master.serviceAccount.name }} +{{- else -}} + {{- if .Values.serviceAccount.create -}} + {{ template "redis.serviceAccountName" . }} + {{- else -}} + {{ default "default" .Values.master.serviceAccount.name }} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the replicas service account to use +*/}} +{{- define "redis.replicaServiceAccountName" -}} +{{- if .Values.replica.serviceAccount.create -}} + {{ default (printf "%s-replica" (include "common.names.fullname" .)) .Values.replica.serviceAccount.name }} +{{- else -}} + {{- if .Values.serviceAccount.create -}} + {{ template "redis.serviceAccountName" . }} + {{- else -}} + {{ default "default" .Values.replica.serviceAccount.name }} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configuration configmap name +*/}} +{{- define "redis.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "redis.createConfigmap" -}} +{{- if empty .Values.existingConfigmap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.auth.existingSecret -}} +{{- printf "%s" (tpl .Values.auth.existingSecret $) -}} +{{- else -}} +{{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis® secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.auth.existingSecret .Values.auth.existingSecretPasswordKey -}} +{{- printf "%s" .Values.auth.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Return Redis® password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.auth.password) -}} + {{- .Values.auth.password -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "redis-password") -}} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.sentinel.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.topologySpreadConstraints" .) -}} +{{- $messages := append $messages (include "redis.validateValues.architecture" .) -}} +{{- $messages := append $messages (include "redis.validateValues.podSecurityPolicy.create" .) -}} +{{- $messages := append $messages (include "redis.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis® - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.topologySpreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.replica.topologySpreadConstraints -}} +redis: topologySpreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis® - must provide a valid architecture */}} +{{- define "redis.validateValues.architecture" -}} +{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replication") -}} +redis: architecture + Invalid architecture selected. Valid values are "standalone" and + "replication". Please set a valid architecture (--set architecture="xxxx") +{{- end -}} +{{- if and .Values.sentinel.enabled (not (eq .Values.architecture "replication")) }} +redis: architecture + Using redis sentinel on standalone mode is not supported. + To deploy redis sentinel, please select the "replication" mode + (--set "architecture=replication,sentinel.enabled=true") +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis® - PodSecurityPolicy create */}} +{{- define "redis.validateValues.podSecurityPolicy.create" -}} +{{- if and .Values.podSecurityPolicy.create (not .Values.podSecurityPolicy.enabled) }} +redis: podSecurityPolicy.create + In order to create PodSecurityPolicy, you also need to enable + podSecurityPolicy.enabled field +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis® - TLS enabled */}} +{{- define "redis.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.tls.autoGenerated) (not .Values.tls.existingSecret) (not .Values.tls.certificatesSecret) }} +redis: tls.enabled + In order to enable TLS, you also need to provide + an existing secret containing the TLS certificates or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* Define the suffix utilized for external-dns */}} +{{- define "redis.externalDNS.suffix" -}} +{{ printf "%s.%s" (include "common.names.fullname" .) .Values.useExternalDNS.suffix }} +{{- end -}} + +{{/* Compile all annotations utilized for external-dns */}} +{{- define "redis.externalDNS.annotations" -}} +{{- if and .Values.useExternalDNS.enabled .Values.useExternalDNS.annotationKey }} +{{ .Values.useExternalDNS.annotationKey }}hostname: {{ include "redis.externalDNS.suffix" . }} +{{- range $key, $val := .Values.useExternalDNS.additionalAnnotations }} +{{ $.Values.useExternalDNS.annotationKey }}{{ $key }}: {{ $val | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/configmap.yaml b/charts/modules/redis/templates/configmap.yaml new file mode 100644 index 0000000..9e70a38 --- /dev/null +++ b/charts/modules/redis/templates/configmap.yaml @@ -0,0 +1,59 @@ +{{- if (include "redis.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + redis.conf: |- + # User-supplied common configuration: + {{- if .Values.commonConfiguration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonConfiguration "context" $ ) | nindent 4 }} + {{- end }} + # End of common configuration + master.conf: |- + dir {{ .Values.master.persistence.path }} + # User-supplied master configuration: + {{- if .Values.master.configuration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.master.disableCommands }} + {{- range .Values.master.disableCommands }} + rename-command {{ . }} "" + {{- end }} + {{- end }} + # End of master configuration + replica.conf: |- + dir {{ .Values.replica.persistence.path }} + # User-supplied replica configuration: + {{- if .Values.replica.configuration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.replica.disableCommands }} + {{- range .Values.replica.disableCommands }} + rename-command {{ . }} "" + {{- end }} + {{- end }} + # End of replica configuration + {{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + port {{ .Values.sentinel.containerPorts.sentinel }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "common.names.fullname" . }}-node-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.sentinel.service.ports.redis }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} + # User-supplied sentinel configuration: + {{- if .Values.sentinel.configuration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.configuration "context" $ ) | nindent 4 }} + {{- end }} + # End of sentinel configuration + {{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/extra-list.yaml b/charts/modules/redis/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/charts/modules/redis/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/charts/modules/redis/templates/headless-svc.yaml b/charts/modules/redis/templates/headless-svc.yaml new file mode 100644 index 0000000..d798a0b --- /dev/null +++ b/charts/modules/redis/templates/headless-svc.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- include "redis.externalDNS.annotations" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: tcp-redis + port: {{ if .Values.sentinel.enabled }}{{ .Values.sentinel.service.ports.redis }}{{ else }}{{ .Values.master.service.ports.redis }}{{ end }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: tcp-sentinel + port: {{ .Values.sentinel.service.ports.sentinel }} + targetPort: redis-sentinel + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/charts/modules/redis/templates/health-configmap.yaml b/charts/modules/redis/templates/health-configmap.yaml new file mode 100644 index 0000000..41f3145 --- /dev/null +++ b/charts/modules/redis/templates/health-configmap.yaml @@ -0,0 +1,192 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') + if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash + +{{- if .Values.auth.sentinel }} + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" +{{- end }} + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert "$REDIS_SENTINEL_TLS_CA_FILE" \ + {{- if .Values.tls.authClients }} + --cert "$REDIS_SENTINEL_TLS_CERT_FILE" \ + --key "$REDIS_SENTINEL_TLS_KEY_FILE" \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') + if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/charts/modules/redis/templates/master/application.yaml b/charts/modules/redis/templates/master/application.yaml new file mode 100644 index 0000000..17cdb8a --- /dev/null +++ b/charts/modules/redis/templates/master/application.yaml @@ -0,0 +1,481 @@ +{{- if or (not (eq .Values.architecture "replication")) (not .Values.sentinel.enabled) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: {{ .Values.master.kind }} +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.master.count }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: master + {{- if (eq .Values.master.kind "StatefulSet") }} + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- end }} + {{- if .Values.master.updateStrategy }} + {{- if (eq .Values.master.kind "Deployment") }} + strategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }} + {{- else }} + updateStrategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }} + {{- end }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: master + {{- if .Values.master.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "redis.createConfigmap" .) }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.podSecurityContext.enabled }} + securityContext: {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.masterServiceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName | quote }} + {{- end }} + {{- if .Values.master.dnsPolicy }} + dnsPolicy: {{ .Values.master.dnsPolicy }} + {{- end }} + {{- if .Values.master.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.master.dnsConfig "context" $) | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.master.terminationGracePeriodSeconds }} + containers: + - name: redis + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.master.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.master.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.master.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.master.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.master.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.master.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.master.containerPorts.redis | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.master.containerPorts.redis | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.master.containerPorts.redis }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.master.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.master.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: redis + {{- end }} + {{- if .Values.master.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.master.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.master.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + {{- if .Values.master.resources }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + {{- if .Values.master.persistence.subPath }} + subPath: {{ .Values.master.persistence.subPath }} + {{- else if .Values.master.persistence.subPathExpr }} + subPathExpr: {{ .Values.master.persistence.subPathExpr }} + {{- end }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + {{- if .Values.auth.enabled }} + - name: REDIS_USER + value: default + {{- if (not .Values.auth.usePasswordFiles) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.master.containerPorts.redis }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.master.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.master.podSecurityContext.enabled .Values.master.containerSecurityContext.enabled }} + {{- if or .Values.master.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.master.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + {{- if .Values.master.persistence.subPath }} + subPath: {{ .Values.master.persistence.subPath }} + {{- else if .Values.master.persistence.subPathExpr }} + subPathExpr: {{ .Values.master.persistence.subPathExpr }} + {{- end }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ include "redis.configmapName" . }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + {{- if .Values.master.persistence.medium }} + emptyDir: + medium: {{ .Values.master.persistence.medium | quote }} + {{- if .Values.master.persistence.sizeLimit }} + sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: tmp + {{- if .Values.master.persistence.medium }} + emptyDir: + medium: {{ .Values.master.persistence.medium | quote }} + {{- if .Values.master.persistence.sizeLimit }} + sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ include "redis.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.master.persistence.enabled }} + - name: redis-data + {{- if .Values.master.persistence.medium }} + emptyDir: { + medium: {{ .Values.master.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- else if .Values.master.persistence.existingClaim }} + - name: redis-data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.master.persistence.existingClaim .) }} + {{- else if (eq .Values.master.kind "Deployment") }} + - name: redis-data + persistentVolumeClaim: + claimName: {{ printf "redis-data-%s-master" (include "common.names.fullname" .) }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: master + {{- if .Values.master.persistence.annotations }} + annotations: {{- toYaml .Values.master.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.master.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/master/psp.yaml b/charts/modules/redis/templates/master/psp.yaml new file mode 100644 index 0000000..2ba93b6 --- /dev/null +++ b/charts/modules/redis/templates/master/psp.yaml @@ -0,0 +1,46 @@ +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.podSecurityPolicy.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.podSecurityContext.fsGroup }} + max: {{ .Values.master.podSecurityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.containerSecurityContext.runAsUser }} + max: {{ .Values.master.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.containerSecurityContext.runAsUser }} + max: {{ .Values.master.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/charts/modules/redis/templates/master/pvc.yaml b/charts/modules/redis/templates/master/pvc.yaml new file mode 100644 index 0000000..e5fddb0 --- /dev/null +++ b/charts/modules/redis/templates/master/pvc.yaml @@ -0,0 +1,27 @@ +{{- if and (eq .Values.architecture "standalone") (eq .Values.master.kind "Deployment") (.Values.master.persistence.enabled) (not .Values.master.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ printf "redis-data-%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.master.persistence.annotations }} + annotations: {{- toYaml .Values.master.persistence.annotations | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.master.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.dataSource "context" $) | nindent 4 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 2 }} +{{- end }} diff --git a/charts/modules/redis/templates/master/service.yaml b/charts/modules/redis/templates/master/service.yaml new file mode 100644 index 0000000..92b513a --- /dev/null +++ b/charts/modules/redis/templates/master/service.yaml @@ -0,0 +1,58 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.master.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.master.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if or (eq .Values.master.service.type "LoadBalancer") (eq .Values.master.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if (semverCompare ">=1.22-0" (include "common.capabilities.kubeVersion" .)) }} + internalTrafficPolicy: {{ .Values.master.service.internalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.master.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and .Values.master.service.clusterIP (eq .Values.master.service.type "ClusterIP") }} + clusterIP: {{ .Values.master.service.clusterIP }} + {{- end }} + {{- if .Values.master.service.sessionAffinity }} + sessionAffinity: {{ .Values.master.service.sessionAffinity }} + {{- end }} + {{- if .Values.master.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.master.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.master.service.ports.redis }} + targetPort: redis + {{- if and (or (eq .Values.master.service.type "NodePort") (eq .Values.master.service.type "LoadBalancer")) .Values.master.service.nodePorts.redis}} + nodePort: {{ .Values.master.service.nodePorts.redis}} + {{- else if eq .Values.master.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.master.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: master +{{- end }} diff --git a/charts/modules/redis/templates/master/serviceaccount.yaml b/charts/modules/redis/templates/master/serviceaccount.yaml new file mode 100644 index 0000000..9c62e5f --- /dev/null +++ b/charts/modules/redis/templates/master/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if .Values.master.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.master.serviceAccount.automountServiceAccountToken }} +metadata: + name: {{ template "redis.masterServiceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.master.serviceAccount.annotations }} + annotations: + {{- if or .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.master.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/metrics-svc.yaml b/charts/modules/redis/templates/metrics-svc.yaml new file mode 100644 index 0000000..13c552f --- /dev/null +++ b/charts/modules/redis/templates/metrics-svc.yaml @@ -0,0 +1,41 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.port }} + protocol: TCP + targetPort: metrics + {{- if .Values.metrics.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/modules/redis/templates/networkpolicy.yaml b/charts/modules/redis/templates/networkpolicy.yaml new file mode 100644 index 0000000..7205cea --- /dev/null +++ b/charts/modules/redis/templates/networkpolicy.yaml @@ -0,0 +1,82 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + {{- if or (eq .Values.architecture "replication") .Values.networkPolicy.extraEgress }} + - Egress + egress: + {{- if eq .Values.architecture "replication" }} + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.master.containerPorts.redis }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.containerPorts.sentinel }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.master.containerPorts.redis }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.containerPorts.sentinel }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- if or .Values.networkPolicy.ingressNSMatchLabels .Values.networkPolicy.ingressNSPodMatchLabels }} + - namespaceSelector: + matchLabels: + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{ else }} + {} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/pdb.yaml b/charts/modules/redis/templates/pdb.yaml new file mode 100644 index 0000000..f82d278 --- /dev/null +++ b/charts/modules/redis/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/modules/redis/templates/prometheusrule.yaml b/charts/modules/redis/templates/prometheusrule.yaml new file mode 100644 index 0000000..b89d116 --- /dev/null +++ b/charts/modules/redis/templates/prometheusrule.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }} +{{- end }} diff --git a/charts/modules/redis/templates/replicas/hpa.yaml b/charts/modules/redis/templates/replicas/hpa.yaml new file mode 100644 index 0000000..ea069a8 --- /dev/null +++ b/charts/modules/redis/templates/replicas/hpa.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.replica.autoscaling.enabled (not .Values.sentinel.enabled) }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: StatefulSet + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + minReplicas: {{ .Values.replica.autoscaling.minReplicas }} + maxReplicas: {{ .Values.replica.autoscaling.maxReplicas }} + metrics: + {{- if .Values.replica.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.replica.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/replicas/service.yaml b/charts/modules/redis/templates/replicas/service.yaml new file mode 100644 index 0000000..f261926 --- /dev/null +++ b/charts/modules/redis/templates/replicas/service.yaml @@ -0,0 +1,58 @@ +{{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.replica.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.replica.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.replica.service.type }} + {{- if or (eq .Values.replica.service.type "LoadBalancer") (eq .Values.replica.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.replica.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if (semverCompare ">=1.22-0" (include "common.capabilities.kubeVersion" .)) }} + internalTrafficPolicy: {{ .Values.replica.service.internalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.replica.service.type "LoadBalancer") (not (empty .Values.replica.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.replica.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.replica.service.type "LoadBalancer") (not (empty .Values.replica.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.replica.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and .Values.replica.service.clusterIP (eq .Values.replica.service.type "ClusterIP") }} + clusterIP: {{ .Values.replica.service.clusterIP }} + {{- end }} + {{- if .Values.replica.service.sessionAffinity }} + sessionAffinity: {{ .Values.replica.service.sessionAffinity }} + {{- end }} + {{- if .Values.replica.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.replica.service.ports.redis }} + targetPort: redis + {{- if and (or (eq .Values.replica.service.type "NodePort") (eq .Values.replica.service.type "LoadBalancer")) .Values.replica.service.nodePorts.redis}} + nodePort: {{ .Values.replica.service.nodePorts.redis}} + {{- else if eq .Values.replica.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.replica.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: replica +{{- end }} diff --git a/charts/modules/redis/templates/replicas/serviceaccount.yaml b/charts/modules/redis/templates/replicas/serviceaccount.yaml new file mode 100644 index 0000000..333ec9f --- /dev/null +++ b/charts/modules/redis/templates/replicas/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if .Values.replica.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.replica.serviceAccount.automountServiceAccountToken }} +metadata: + name: {{ template "redis.replicaServiceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.replica.serviceAccount.annotations }} + annotations: + {{- if or .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.replica.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/replicas/statefulset.yaml b/charts/modules/redis/templates/replicas/statefulset.yaml new file mode 100644 index 0000000..dc52c48 --- /dev/null +++ b/charts/modules/redis/templates/replicas/statefulset.yaml @@ -0,0 +1,482 @@ +{{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.replica.autoscaling.enabled }} + replicas: {{ .Values.replica.replicaCount }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: replica + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- if .Values.replica.updateStrategy }} + updateStrategy: {{- toYaml .Values.replica.updateStrategy | nindent 4 }} + {{- end }} + {{- if .Values.replica.podManagementPolicy }} + podManagementPolicy: {{ .Values.replica.podManagementPolicy | quote }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: replica + {{- if .Values.replica.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "redis.createConfigmap" .) }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.replica.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.replica.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.replica.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.podSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.replicaServiceAccountName" . }} + {{- if .Values.replica.priorityClassName }} + priorityClassName: {{ .Values.replica.priorityClassName | quote }} + {{- end }} + {{- if .Values.replica.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.replica.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAffinityPreset "component" "replica" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAntiAffinityPreset "component" "replica" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.replica.nodeAffinityPreset.type "key" .Values.replica.nodeAffinityPreset.key "values" .Values.replica.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.replica.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.replica.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.replica.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.replica.shareProcessNamespace }} + {{- end }} + {{- if .Values.replica.schedulerName }} + schedulerName: {{ .Values.replica.schedulerName | quote }} + {{- end }} + {{- if .Values.replica.dnsPolicy }} + dnsPolicy: {{ .Values.replica.dnsPolicy }} + {{- end }} + {{- if .Values.replica.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.dnsConfig "context" $) | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.replica.terminationGracePeriodSeconds }} + containers: + - name: redis + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.replica.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.replica.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.replica.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.replica.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.replica.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-replica.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: REDIS_REPLICATION_MODE + value: replica + - name: REDIS_MASTER_HOST + {{- if and (eq (int64 .Values.master.count) 1) (ne .Values.master.kind "Deployment") }} + value: {{ template "common.names.fullname" . }}-master-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + {{- else }} + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + {{- end }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.master.containerPorts.redis | quote }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + {{- end }} + {{- if .Values.replica.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.replica.extraEnvVarsCM .Values.replica.extraEnvVarsSecret }} + envFrom: + {{- if .Values.replica.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.replica.extraEnvVarsCM }} + {{- end }} + {{- if .Values.replica.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.replica.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.replica.containerPorts.redis }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.replica.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: redis + {{- end }} + {{- if .Values.replica.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.replica.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.replica.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.replica.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.replica.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.replica.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.replica.readinessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + {{- if .Values.replica.resources }} + resources: {{- toYaml .Values.replica.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + {{- if .Values.replica.persistence.subPath }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- else if .Values.replica.persistence.subPathExpr }} + subPathExpr: {{ .Values.replica.persistence.subPathExpr }} + {{- end }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.replica.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + {{- if .Values.auth.enabled }} + - name: REDIS_USER + value: default + {{- if (not .Values.auth.usePasswordFiles) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.replica.containerPorts.redis }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.replica.persistence.enabled .Values.replica.podSecurityContext.enabled .Values.replica.containerSecurityContext.enabled }} + {{- if or .Values.replica.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.replica.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.replica.persistence.path }} + {{- else }} + chown -R {{ .Values.replica.containerSecurityContext.runAsUser }}:{{ .Values.replica.podSecurityContext.fsGroup }} {{ .Values.replica.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.persistence.subPath }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- else if .Values.replica.persistence.subPathExpr }} + subPathExpr: {{ .Values.replica.persistence.subPathExpr }} + {{- end }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ include "redis.configmapName" . }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + {{- if .Values.replica.persistence.medium }} + emptyDir: + medium: {{ .Values.replica.persistence.medium | quote }} + {{- if .Values.replica.persistence.sizeLimit }} + sizeLimit: {{ .Values.replica.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ include "redis.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.replica.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.replica.persistence.enabled }} + - name: redis-data + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- else if .Values.replica.persistence.existingClaim }} + - name: redis-data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.replica.persistence.existingClaim .) }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: replica + {{- if .Values.replica.persistence.annotations }} + annotations: {{- toYaml .Values.replica.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.replica.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.replica.persistence.size | quote }} + {{- if .Values.replica.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.replica.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.replica.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.replica.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/role.yaml b/charts/modules/redis/templates/role.yaml new file mode 100644 index 0000000..596466f --- /dev/null +++ b/charts/modules/redis/templates/role.yaml @@ -0,0 +1,28 @@ +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} + {{- if and $pspAvailable .Values.podSecurityPolicy.enabled }} + - apiGroups: + - '{{ template "podSecurityPolicy.apiGroup" . }}' + resources: + - 'podsecuritypolicies' + verbs: + - 'use' + resourceNames: [{{ printf "%s-master" (include "common.names.fullname" .) }}] + {{- end }} + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/rolebinding.yaml b/charts/modules/redis/templates/rolebinding.yaml new file mode 100644 index 0000000..74968b8 --- /dev/null +++ b/charts/modules/redis/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "common.names.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end }} diff --git a/charts/modules/redis/templates/scripts-configmap.yaml b/charts/modules/redis/templates/scripts-configmap.yaml new file mode 100644 index 0000000..36feb29 --- /dev/null +++ b/charts/modules/redis/templates/scripts-configmap.yaml @@ -0,0 +1,653 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libos.sh + . /opt/bitnami/scripts/liblog.sh + . /opt/bitnami/scripts/libvalidations.sh + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "REDIS") + echo {{ .Values.master.containerPorts.redis }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + if [ -n "$REDIS_EXTERNAL_MASTER_HOST" ]; then + REDIS_SERVICE="$REDIS_EXTERNAL_MASTER_HOST" + else + REDIS_SERVICE="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + fi + + SENTINEL_SERVICE_PORT=$(get_port "{{ include "common.names.fullname" . }}" "TCP_SENTINEL") + validate_quorum() { + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + quorum_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel master {{ .Values.sentinel.masterSet }}" + else + quorum_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel master {{ .Values.sentinel.masterSet }}" + fi + info "about to run the command: $quorum_info_command" + eval $quorum_info_command | grep -Fq "s_down" + } + + trigger_manual_failover() { + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + failover_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel failover {{ .Values.sentinel.masterSet }}" + else + failover_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel failover {{ .Values.sentinel.masterSet }}" + fi + + info "about to run the command: $failover_command" + eval $failover_command + } + + get_sentinel_master_info() { + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + info "about to run the command: $sentinel_info_command" + eval $sentinel_info_command + } + + {{- if and .Values.replica.containerSecurityContext.runAsUser (eq (.Values.replica.containerSecurityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.replica.persistence.path }} + {{- end }} + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + + # check if there is a master + master_in_persisted_conf="$(get_full_hostname "$HOSTNAME")" + master_port_in_persisted_conf="$REDIS_MASTER_PORT_NUMBER" + master_in_sentinel="$(get_sentinel_master_info)" + redisRetVal=$? + + {{- if .Values.sentinel.persistence.enabled }} + if [[ -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + master_in_persisted_conf="$(awk '/monitor/ {print $4}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + master_port_in_persisted_conf="$(awk '/monitor/ {print $5}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + info "Found previous master ${master_in_persisted_conf}:${master_port_in_persisted_conf} in /opt/bitnami/redis-sentinel/etc/sentinel.conf" + debug "$(cat /opt/bitnami/redis-sentinel/etc/sentinel.conf | grep monitor)" + touch /opt/bitnami/redis-sentinel/etc/.node_read + fi + {{- end }} + + if [[ $redisRetVal -ne 0 ]]; then + if [[ "$master_in_persisted_conf" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # Case 1: No active sentinel and in previous sentinel.conf we were the master --> MASTER + info "Configuring the node as master" + export REDIS_REPLICATION_MODE="master" + else + # Case 2: No active sentinel and in previous sentinel.conf we were not master --> REPLICA + info "Configuring the node as replica" + export REDIS_REPLICATION_MODE="replica" + REDIS_MASTER_HOST=${master_in_persisted_conf} + REDIS_MASTER_PORT_NUMBER=${master_port_in_persisted_conf} + fi + else + # Fetches current master's host and port + REDIS_SENTINEL_INFO=($(get_sentinel_master_info)) + info "Current master: REDIS_SENTINEL_INFO=(${REDIS_SENTINEL_INFO[0]},${REDIS_SENTINEL_INFO[1]})" + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + if [[ "$REDIS_MASTER_HOST" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # Case 3: Active sentinel and master it is this node --> MASTER + info "Configuring the node as master" + export REDIS_REPLICATION_MODE="master" + else + # Case 4: Active sentinel and master is not this node --> REPLICA + info "Configuring the node as replica" + export REDIS_REPLICATION_MODE="replica" + + {{- if and .Values.sentinel.automateClusterRecovery (le (int .Values.sentinel.downAfterMilliseconds) 2000) }} + retry_count=1 + while validate_quorum + do + info "sleeping, waiting for Redis master to come up" + sleep 1s + if ! ((retry_count % 11)); then + info "Trying to manually failover" + failover_result=$(trigger_manual_failover) + + debug "Failover result: $failover_result" + fi + + ((retry_count+=1)) + done + info "Redis master is up now" + {{- end }} + fi + fi + + if [[ -n "$REDIS_EXTERNAL_MASTER_HOST" ]]; then + REDIS_MASTER_HOST="$REDIS_EXTERNAL_MASTER_HOST" + REDIS_MASTER_PORT_NUMBER="${REDIS_EXTERNAL_MASTER_PORT}" + fi + + if [[ -f /opt/bitnami/redis/mounted-etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" = "slave" ]] || [[ "$REDIS_REPLICATION_MODE" = "replica" ]]; then + ARGS+=("--replicaof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.auth.enabled }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.replica.extraFlags }} + {{- range .Values.replica.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.replica.preExecCmds }} + {{- .Values.replica.preExecCmds | nindent 4 }} + {{- end }} + + {{- if .Values.replica.command }} + exec {{ .Values.replica.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libos.sh + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libfile.sh + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "REDIS") + echo {{ .Values.master.containerPorts.redis }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + + SERVPORT=$(get_port "$HOSTNAME" "SENTINEL") + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + SENTINEL_SERVICE_PORT=$(get_port "{{ include "common.names.fullname" . }}" "TCP_SENTINEL") + + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + get_sentinel_master_info() { + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + info "about to run the command: $sentinel_info_command" + eval $sentinel_info_command + } + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + + master_in_persisted_conf="$(get_full_hostname "$HOSTNAME")" + + {{- if .Values.sentinel.persistence.enabled }} + if [[ -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + check_lock_file() { + [[ -f /opt/bitnami/redis-sentinel/etc/.node_read ]] + } + retry_while "check_lock_file" + rm -f /opt/bitnami/redis-sentinel/etc/.node_read + master_in_persisted_conf="$(awk '/monitor/ {print $4}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + info "Found previous master $master_in_persisted_conf in /opt/bitnami/redis-sentinel/etc/sentinel.conf" + debug "$(cat /opt/bitnami/redis-sentinel/etc/sentinel.conf | grep monitor)" + fi + {{- end }} + if ! get_sentinel_master_info && [[ "$master_in_persisted_conf" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # No master found, lets create a master node + export REDIS_REPLICATION_MODE="master" + + REDIS_MASTER_HOST=$(get_full_hostname "$HOSTNAME") + REDIS_MASTER_PORT_NUMBER="$REDISPORT" + else + export REDIS_REPLICATION_MODE="replica" + + # Fetches current master's host and port + REDIS_SENTINEL_INFO=($(get_sentinel_master_info)) + info "printing REDIS_SENTINEL_INFO=(${REDIS_SENTINEL_INFO[0]},${REDIS_SENTINEL_INFO[1]})" + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + fi + + if [[ -n "$REDIS_EXTERNAL_MASTER_HOST" ]]; then + REDIS_MASTER_HOST="$REDIS_EXTERNAL_MASTER_HOST" + REDIS_MASTER_PORT_NUMBER="${REDIS_EXTERNAL_MASTER_PORT}" + fi + + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.auth.enabled }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if and .Values.auth.enabled .Values.auth.sentinel }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + + if [[ -z "$REDIS_MASTER_HOST" ]] || [[ -z "$REDIS_MASTER_PORT_NUMBER" ]] + then + # Prevent incorrect configuration to be written to sentinel.conf + error "Redis master host is configured incorrectly (host: $REDIS_MASTER_HOST, port: $REDIS_MASTER_PORT_NUMBER)" + exit 1 + fi + + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_known_sentinel() { + hostname="$1" + ip="$2" + + if [[ -n "$hostname" && -n "$ip" && "$hostname" != "$HOSTNAME" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $(get_full_hostname "$hostname") $(get_port "$hostname" "SENTINEL") $(host_id "$hostname")" + fi + } + add_known_replica() { + hostname="$1" + ip="$2" + + if [[ -n "$ip" && "$(get_full_hostname "$hostname")" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $(get_full_hostname "$hostname") $(get_port "$hostname" "REDIS")" + fi + } + + # Add available hosts on the network as known replicas & sentinels + for node in $(seq 0 $(({{ .Values.replica.replicaCount }}-1))); do + hostname="{{ template "common.names.fullname" . }}-node-$node" + ip="$(getent hosts "$hostname.$HEADLESS_SERVICE" | awk '{ print $1 }')" + add_known_sentinel "$hostname" "$ip" + add_known_replica "$hostname" "$ip" + done + + echo "" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if not (contains "sentinel announce-hostnames" .Values.sentinel.configuration) }} + echo "sentinel announce-hostnames yes" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- if not (contains "sentinel resolve-hostnames" .Values.sentinel.configuration) }} + echo "sentinel resolve-hostnames yes" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- if not (contains "sentinel announce-port" .Values.sentinel.configuration) }} + echo "sentinel announce-port $SERVPORT" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- if not (contains "sentinel announce-ip" .Values.sentinel.configuration) }} + echo "sentinel announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} --sentinel + prestop-sentinel.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libos.sh + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + SENTINEL_SERVICE_ENV_NAME={{ printf "%s%s" (upper (include "common.names.fullname" .)| replace "-" "_") "_SERVICE_PORT_TCP_SENTINEL" }} + SENTINEL_SERVICE_PORT=${!SENTINEL_SERVICE_ENV_NAME} + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + run_sentinel_command() { + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + redis-cli -h "$REDIS_SERVICE" -p "$SENTINEL_SERVICE_PORT" --tls --cert "$REDIS_SENTINEL_TLS_CERT_FILE" --key "$REDIS_SENTINEL_TLS_KEY_FILE" --cacert "$REDIS_SENTINEL_TLS_CA_FILE" sentinel "$@" + else + redis-cli -h "$REDIS_SERVICE" -p "$SENTINEL_SERVICE_PORT" sentinel "$@" + fi + } + failover_finished() { + REDIS_SENTINEL_INFO=($(run_sentinel_command get-master-addr-by-name "{{ .Values.sentinel.masterSet }}")) + REDIS_MASTER_HOST="${REDIS_SENTINEL_INFO[0]}" + [[ "$REDIS_MASTER_HOST" != "$(get_full_hostname $HOSTNAME)" ]] + } + + REDIS_SERVICE="{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + {{ if .Values.auth.sentinel -}} + # redis-cli automatically consumes credentials from the REDISCLI_AUTH variable + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + [[ -f "$REDIS_PASSWORD_FILE" ]] && export REDISCLI_AUTH="$(< "${REDIS_PASSWORD_FILE}")" + {{- end }} + + if ! failover_finished; then + echo "I am the master pod and you are stopping me. Starting sentinel failover" + # if I am the master, issue a command to failover once and then wait for the failover to finish + run_sentinel_command failover "{{ .Values.sentinel.masterSet }}" + if retry_while "failover_finished" "{{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}" 1; then + echo "Master has been successfuly failed over to a different pod." + exit 0 + else + echo "Master failover failed" + exit 1 + fi + else + exit 0 + fi + prestop-redis.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libos.sh + + run_redis_command() { + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + redis-cli -h 127.0.0.1 -p "$REDIS_TLS_PORT" --tls --cert "$REDIS_TLS_CERT_FILE" --key "$REDIS_TLS_KEY_FILE" --cacert "$REDIS_TLS_CA_FILE" "$@" + else + redis-cli -h 127.0.0.1 -p "$REDIS_PORT" "$@" + fi + } + failover_finished() { + REDIS_ROLE=$(run_redis_command role | head -1) + [[ "$REDIS_ROLE" != "master" ]] + } + + # redis-cli automatically consumes credentials from the REDISCLI_AUTH variable + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + [[ -f "$REDIS_PASSWORD_FILE" ]] && export REDISCLI_AUTH="$(< "${REDIS_PASSWORD_FILE}")" + + if ! failover_finished; then + echo "I am the master pod and you are stopping me." + # Running failover command + echo "Running the FAILOVER command" + run_redis_command FAILOVER TIMEOUT "{{ mul (sub .Values.sentinel.terminationGracePeriodSeconds 10) 1000 }}" + # Waiting for the failover to finish + echo "Waiting for the failover to finish up to {{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}s" + if retry_while "failover_finished" "{{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}" 1 + echo "Master has been successfuly failed over to a different pod." + exit 0 + else + echo "Master failover failed" + exit 1 + fi + else + exit 0 + fi + +{{- else }} + start-master.sh: | + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + {{- if and .Values.master.containerSecurityContext.runAsUser (eq (.Values.master.containerSecurityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -f /opt/bitnami/redis/mounted-etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.auth.enabled }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4 }} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if eq .Values.architecture "replication" }} + start-replica.sh: | + #!/bin/bash + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "REDIS") + echo {{ .Values.master.containerPorts.redis }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + echo "${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + echo "${hostname}.{{- .Release.Namespace }}" + {{- else }} + echo "${hostname}.${HEADLESS_SERVICE}" + {{- end }} + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + {{- if and .Values.replica.containerSecurityContext.runAsUser (eq (.Values.replica.containerSecurityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.replica.persistence.path }} + {{- end }} + if [[ -f /opt/bitnami/redis/mounted-etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--replicaof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.auth.enabled }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.replica.extraFlags }} + {{- range .Values.replica.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.replica.preExecCmds }} + {{ .Values.replica.preExecCmds | nindent 4 }} + {{- end }} + {{- if .Values.replica.command }} + exec {{ .Values.replica.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/secret.yaml b/charts/modules/redis/templates/secret.yaml new file mode 100644 index 0000000..2edc0d8 --- /dev/null +++ b/charts/modules/redis/templates/secret.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.secretAnnotations .Values.commonAnnotations }} + annotations: + {{- if .Values.secretAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secretAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/charts/modules/redis/templates/sentinel/hpa.yaml b/charts/modules/redis/templates/sentinel/hpa.yaml new file mode 100644 index 0000000..e1b765e --- /dev/null +++ b/charts/modules/redis/templates/sentinel/hpa.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.replica.autoscaling.enabled .Values.sentinel.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: StatefulSet + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + minReplicas: {{ .Values.replica.autoscaling.minReplicas }} + maxReplicas: {{ .Values.replica.autoscaling.maxReplicas }} + metrics: + {{- if .Values.replica.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.replica.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/sentinel/node-services.yaml b/charts/modules/redis/templates/sentinel/node-services.yaml new file mode 100644 index 0000000..d3e635e --- /dev/null +++ b/charts/modules/redis/templates/sentinel/node-services.yaml @@ -0,0 +1,70 @@ +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (or .Release.IsUpgrade .Values.sentinel.service.nodePorts.redis ) }} + +{{- range $i := until (int .Values.replica.replicaCount) }} + +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" $ ) "ports-configmap")).data }} + +{{ $sentinelport := 0}} +{{ $redisport := 0}} +{{- if $portsmap }} +{{ $sentinelport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "sentinel") }} +{{ $redisport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "redis") }} +{{- else }} +{{- end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" $ }}-node-{{ $i }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: node + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or $.Values.sentinel.service.annotations $.Values.commonAnnotations }} + annotations: + {{- if $.Values.sentinel.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.sentinel.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: NodePort + ports: + - name: sentinel + {{- if $.Values.sentinel.service.nodePorts.sentinel }} + nodePort: {{ (add $.Values.sentinel.service.nodePorts.sentinel $i 1) }} + port: {{ (add $.Values.sentinel.service.nodePorts.sentinel $i 1) }} + {{- else }} + nodePort: {{ $sentinelport }} + port: {{ $sentinelport }} + {{- end }} + protocol: TCP + targetPort: {{ $.Values.sentinel.containerPorts.sentinel }} + - name: redis + {{- if $.Values.sentinel.service.nodePorts.redis }} + nodePort: {{ (add $.Values.sentinel.service.nodePorts.redis $i 1) }} + port: {{ (add $.Values.sentinel.service.nodePorts.redis $i 1) }} + {{- else }} + nodePort: {{ $redisport }} + port: {{ $redisport }} + {{- end }} + protocol: TCP + targetPort: {{ $.Values.replica.containerPorts.redis }} + - name: sentinel-internal + nodePort: null + port: {{ $.Values.sentinel.containerPorts.sentinel }} + protocol: TCP + targetPort: {{ $.Values.sentinel.containerPorts.sentinel }} + - name: redis-internal + nodePort: null + port: {{ $.Values.replica.containerPorts.redis }} + protocol: TCP + targetPort: {{ $.Values.replica.containerPorts.redis }} + selector: + statefulset.kubernetes.io/pod-name: {{ template "common.names.fullname" $ }}-node-{{ $i }} +--- +{{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/sentinel/ports-configmap.yaml b/charts/modules/redis/templates/sentinel/ports-configmap.yaml new file mode 100644 index 0000000..f5e7b2a --- /dev/null +++ b/charts/modules/redis/templates/sentinel/ports-configmap.yaml @@ -0,0 +1,100 @@ +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (not .Values.sentinel.service.nodePorts.redis ) }} +{{- /* create a list to keep track of ports we choose to use */}} +{{ $chosenports := (list ) }} + +{{- /* Get list of all used nodeports */}} +{{ $usedports := (list ) }} +{{- range $index, $service := (lookup "v1" "Service" "" "").items }} + {{- range.spec.ports }} + {{- if .nodePort }} + {{- $usedports = (append $usedports .nodePort) }} + {{- end }} + {{- end }} +{{- end }} + +{{- /* +comments that start with # are rendered in the output when you debug, so you can less and search for them +Vars in the comment will be rendered out, so you can check their value this way. +https://helm.sh/docs/chart_best_practices/templates/#comments-yaml-comments-vs-template-comments + +remove the template comments and leave the yaml comments to help debug +*/}} + +{{- /* Sort the list */}} +{{ $usedports = $usedports | sortAlpha }} +#usedports {{ $usedports }} + +{{- /* How many nodeports per service do we want to create, except for the main service which is always two */}} +{{ $numberofPortsPerNodeService := 2 }} + +{{- /* for every nodeport we want, loop though the used ports to get an unused port */}} +{{- range $j := until (int (add (mul (int .Values.replica.replicaCount) $numberofPortsPerNodeService) 2)) }} + {{- /* #j={{ $j }} */}} + {{- $nodeport := (add $j 30000) }} + {{- $nodeportfound := false }} + {{- range $i := $usedports }} + {{- /* #i={{ $i }} + #nodeport={{ $nodeport }} + #usedports={{ $usedports }} */}} + {{- if and (has (toString $nodeport) $usedports) (eq $nodeportfound false) }} + {{- /* nodeport conflicts with in use */}} + {{- $nodeport = (add $nodeport 1) }} + {{- else if and ( has $nodeport $chosenports) (eq $nodeportfound false) }} + {{- /* nodeport already chosen, try another */}} + {{- $nodeport = (add $nodeport 1) }} + {{- else if (eq $nodeportfound false) }} + {{- /* nodeport free to use: not already claimed and not in use */}} + {{- /* select nodeport, and place into usedports */}} + {{- $chosenports = (append $chosenports $nodeport) }} + {{- $nodeportfound = true }} + {{- else }} + {{- /* nodeport has already been chosen and locked in, just work through the rest of the list to get to the next nodeport selection */}} + {{- end }} + {{- end }} + {{- if (eq $nodeportfound false) }} + {{- $chosenports = (append $chosenports $nodeport) }} + {{- end }} + +{{- end }} + +{{- /* print the usedports and chosenports for debugging */}} +#usedports {{ $usedports }} +#chosenports {{ $chosenports }}}} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-ports-configmap + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" . ) "ports-configmap")).data }} +{{- if $portsmap }} +{{- /* configmap already exists, do not install again */ -}} + {{- range $name, $value := $portsmap }} + "{{ $name }}": "{{ $value }}" + {{- end }} +{{- else }} +{{- /* configmap being set for first time */ -}} + {{- range $index, $port := $chosenports }} + {{- $nodenumber := (floor (div $index 2)) }} + {{- if (eq $index 0) }} + "{{ template "common.names.fullname" $ }}-sentinel": "{{ $port }}" + {{- else if (eq $index 1) }} + "{{ template "common.names.fullname" $ }}-redis": "{{ $port }}" + {{- else if (eq (mod $index 2) 0) }} + "{{ template "common.names.fullname" $ }}-node-{{ (sub $nodenumber 1) }}-sentinel": "{{ $port }}" + {{- else if (eq (mod $index 2) 1) }} + "{{ template "common.names.fullname" $ }}-node-{{ (sub $nodenumber 1) }}-redis": "{{ $port }}" + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/sentinel/service.yaml b/charts/modules/redis/templates/sentinel/service.yaml new file mode 100644 index 0000000..362d681 --- /dev/null +++ b/charts/modules/redis/templates/sentinel/service.yaml @@ -0,0 +1,103 @@ +{{- if or .Release.IsUpgrade (ne .Values.sentinel.service.type "NodePort") .Values.sentinel.service.nodePorts.redis -}} +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" . ) "ports-configmap")).data }} + +{{ $sentinelport := 0}} +{{ $redisport := 0}} +{{- if $portsmap }} +{{ $sentinelport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "sentinel") }} +{{ $redisport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "redis") }} +{{- else }} +{{- end }} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.sentinel.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.sentinel.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{- if or (eq .Values.sentinel.service.type "LoadBalancer") (eq .Values.sentinel.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.sentinel.service.type "LoadBalancer") (not (empty .Values.sentinel.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.sentinel.service.type "LoadBalancer") (not (empty .Values.sentinel.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.sentinel.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and .Values.sentinel.service.clusterIP (eq .Values.sentinel.service.type "ClusterIP") }} + clusterIP: {{ .Values.sentinel.service.clusterIP }} + {{- end }} + {{- if .Values.sentinel.service.sessionAffinity }} + sessionAffinity: {{ .Values.sentinel.service.sessionAffinity }} + {{- end }} + {{- if .Values.sentinel.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-redis + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.redis }} + port: {{ .Values.sentinel.service.nodePorts.redis }} + {{- else if eq .Values.sentinel.service.type "NodePort" }} + port: {{ $redisport }} + {{- else}} + port: {{ .Values.sentinel.service.ports.redis }} + {{- end }} + targetPort: {{ .Values.replica.containerPorts.redis }} + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.redis }} + nodePort: {{ .Values.sentinel.service.nodePorts.redis }} + {{- else if eq .Values.sentinel.service.type "ClusterIP" }} + nodePort: null + {{- else if eq .Values.sentinel.service.type "NodePort" }} + nodePort: {{ $redisport }} + {{- end }} + - name: tcp-sentinel + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.sentinel }} + port: {{ .Values.sentinel.service.nodePorts.sentinel }} + {{- else if eq .Values.sentinel.service.type "NodePort" }} + port: {{ $sentinelport }} + {{- else }} + port: {{ .Values.sentinel.service.ports.sentinel }} + {{- end }} + targetPort: {{ .Values.sentinel.containerPorts.sentinel }} + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.sentinel }} + nodePort: {{ .Values.sentinel.service.nodePorts.sentinel }} + {{- else if eq .Values.sentinel.service.type "ClusterIP" }} + nodePort: null + {{- else if eq .Values.sentinel.service.type "NodePort" }} + nodePort: {{ $sentinelport }} + {{- end }} + {{- if eq .Values.sentinel.service.type "NodePort" }} + - name: sentinel-internal + nodePort: null + port: {{ .Values.sentinel.containerPorts.sentinel }} + protocol: TCP + targetPort: {{ .Values.sentinel.containerPorts.sentinel }} + - name: redis-internal + nodePort: null + port: {{ .Values.replica.containerPorts.redis }} + protocol: TCP + targetPort: {{ .Values.replica.containerPorts.redis }} + {{- end }} + {{- if .Values.sentinel.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: node +{{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/sentinel/statefulset.yaml b/charts/modules/redis/templates/sentinel/statefulset.yaml new file mode 100644 index 0000000..dda8a12 --- /dev/null +++ b/charts/modules/redis/templates/sentinel/statefulset.yaml @@ -0,0 +1,710 @@ +{{- if or .Release.IsUpgrade (ne .Values.sentinel.service.type "NodePort") .Values.sentinel.service.nodePorts.redis -}} +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replica.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: node + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- if .Values.replica.updateStrategy }} + updateStrategy: {{- toYaml .Values.replica.updateStrategy | nindent 4 }} + {{- end }} + {{- if .Values.replica.podManagementPolicy }} + podManagementPolicy: {{ .Values.replica.podManagementPolicy | quote }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: node + {{- if .Values.replica.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "redis.createConfigmap" .) }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.replica.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.replica.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.replica.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.podSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.replica.priorityClassName }} + priorityClassName: {{ .Values.replica.priorityClassName | quote }} + {{- end }} + {{- if .Values.replica.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.replica.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAffinityPreset "component" "node" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAntiAffinityPreset "component" "node" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.replica.nodeAffinityPreset.type "key" .Values.replica.nodeAffinityPreset.key "values" .Values.replica.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.replica.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.replica.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.replica.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.replica.shareProcessNamespace }} + {{- end }} + {{- if .Values.replica.schedulerName }} + schedulerName: {{ .Values.replica.schedulerName | quote }} + {{- end }} + {{- if .Values.replica.dnsPolicy }} + dnsPolicy: {{ .Values.replica.dnsPolicy }} + {{- end }} + {{- if .Values.replica.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.dnsConfig "context" $) | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.sentinel.terminationGracePeriodSeconds }} + containers: + - name: redis + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.replica.lifecycleHooks "context" $) | nindent 12 }} + {{- else }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/prestop-redis.sh + {{- end }} + {{- end }} + {{- if .Values.replica.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.replica.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.replica.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.replica.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.replica.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.replica.containerPorts.redis | quote }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.externalMaster.enabled }} + - name: REDIS_EXTERNAL_MASTER_HOST + value: {{ .Values.replica.externalMaster.host | quote }} + - name: REDIS_EXTERNAL_MASTER_PORT + value: {{ .Values.replica.externalMaster.port | quote }} + {{- end }} + {{- if .Values.replica.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.replica.extraEnvVarsCM .Values.replica.extraEnvVarsSecret }} + envFrom: + {{- if .Values.replica.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.replica.extraEnvVarsCM }} + {{- end }} + {{- if .Values.replica.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.replica.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.replica.containerPorts.redis }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.replica.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.replica.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.replica.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.replica.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.replica.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.replica.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.replica.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.replica.readinessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + {{- if .Values.replica.resources }} + resources: {{- toYaml .Values.replica.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.sentinel.persistence.enabled }} + - name: sentinel-data + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.persistence.subPath }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- else if .Values.replica.persistence.subPathExpr }} + subPathExpr: {{ .Values.replica.persistence.subPathExpr }} + {{- end }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + - name: tmp + mountPath: /tmp + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.replica.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + - name: sentinel + image: {{ template "redis.sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.sentinel.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.lifecycleHooks "context" $) | nindent 12 }} + {{- else }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/prestop-sentinel.sh + {{- end }} + {{- end }} + {{- if .Values.sentinel.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.sentinel.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.sentinel.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.sentinel.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.sentinel.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.containerPorts.sentinel | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.containerPorts.sentinel | quote }} + {{- end }} + {{- if .Values.sentinel.externalMaster.enabled }} + - name: REDIS_EXTERNAL_MASTER_HOST + value: {{ .Values.sentinel.externalMaster.host | quote }} + - name: REDIS_EXTERNAL_MASTER_PORT + value: {{ .Values.sentinel.externalMaster.port | quote }} + {{- end }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.containerPorts.sentinel }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.sentinel.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.sentinel.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.sentinel.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.sentinel.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + {{- if .Values.sentinel.resources }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: sentinel-data + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.persistence.subPath }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- else if .Values.replica.persistence.subPathExpr }} + subPathExpr: {{ .Values.replica.persistence.subPathExpr }} + {{- end }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.sentinel.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + {{- if .Values.auth.enabled }} + - name: REDIS_USER + value: default + {{- if (not .Values.auth.usePasswordFiles) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.replica.containerPorts.redis }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.replica.persistence.enabled .Values.replica.podSecurityContext.enabled .Values.replica.containerSecurityContext.enabled }} + {{- if or .Values.replica.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.replica.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.replica.persistence.path }} + {{- else }} + chown -R {{ .Values.replica.containerSecurityContext.runAsUser }}:{{ .Values.replica.podSecurityContext.fsGroup }} {{ .Values.replica.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.persistence.subPath }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- else if .Values.replica.persistence.subPathExpr }} + subPathExpr: {{ .Values.replica.persistence.subPathExpr }} + {{- end }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ include "redis.configmapName" . }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + {{- if not .Values.sentinel.persistence.enabled }} + - name: sentinel-data + {{- if .Values.sentinel.persistence.medium }} + emptyDir: { + medium: {{ .Values.sentinel.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + - name: redis-tmp-conf + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + - name: tmp + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.replica.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.sentinel.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ include "redis.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if not .Values.replica.persistence.enabled }} + - name: redis-data + {{- if .Values.replica.persistence.medium }} + emptyDir: { + medium: {{ .Values.replica.persistence.medium | quote }} + } + {{- else }} + emptyDir: {} + {{- end }} + {{- else if .Values.replica.persistence.existingClaim }} + - name: redis-data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.replica.persistence.existingClaim .) }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: node + {{- if .Values.replica.persistence.annotations }} + annotations: {{- toYaml .Values.replica.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.replica.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.replica.persistence.size | quote }} + {{- if .Values.replica.persistence.selector }} + selector: {{- include "common.tplvalues.render" ( dict "value" .Values.replica.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.replica.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.sentinel.persistence.enabled }} + - metadata: + name: sentinel-data + labels: {{- include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: node + {{- if .Values.sentinel.persistence.annotations }} + annotations: {{- toYaml .Values.sentinel.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.sentinel.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.sentinel.persistence.size | quote }} + {{- if .Values.sentinel.persistence.selector }} + selector: {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.sentinel.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.sentinel.persistence "global" .Values.global) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/modules/redis/templates/serviceaccount.yaml b/charts/modules/redis/templates/serviceaccount.yaml new file mode 100644 index 0000000..9faa175 --- /dev/null +++ b/charts/modules/redis/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.serviceAccount.create (and (not .Values.master.serviceAccount.create) (not .Values.replica.serviceAccount.create)) }} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.serviceAccount.annotations }} + annotations: + {{- if or .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/modules/redis/templates/servicemonitor.yaml b/charts/modules/redis/templates/servicemonitor.yaml new file mode 100644 index 0000000..9bdad94 --- /dev/null +++ b/charts/modules/redis/templates/servicemonitor.yaml @@ -0,0 +1,44 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: {{- toYaml .Values.metrics.serviceMonitor.podTargetLabels | nindent 4 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/charts/modules/redis/templates/tls-secret.yaml b/charts/modules/redis/templates/tls-secret.yaml new file mode 100644 index 0000000..4f9c39b --- /dev/null +++ b/charts/modules/redis/templates/tls-secret.yaml @@ -0,0 +1,30 @@ +{{- if (include "redis.createTlsSecret" .) }} +{{- $secretName := printf "%s-crt" (include "common.names.fullname" .) }} +{{- $existingCerts := (lookup "v1" "Secret" .Release.Namespace $secretName).data | default dict }} +{{- $ca := genCA "redis-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $masterServiceName := printf "%s-master" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $masterServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $masterServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ (get $existingCerts "ca.crt") | default ($ca.Cert | b64enc | quote ) }} + tls.crt: {{ (get $existingCerts "tls.crt") | default ($crt.Cert | b64enc | quote) }} + tls.key: {{ (get $existingCerts "tls.key") | default ($crt.Key | b64enc | quote) }} +{{- end }} diff --git a/charts/modules/redis/values.schema.json b/charts/modules/redis/values.schema.json new file mode 100644 index 0000000..d6e226b --- /dev/null +++ b/charts/modules/redis/values.schema.json @@ -0,0 +1,156 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "Redis architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`", + "enum": ["standalone", "replication"] + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Use password authentication" + }, + "password": { + "type": "string", + "title": "Redis password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "auth/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "kind": { + "type": "string", + "title": "Workload Kind", + "form": true, + "description": "Allowed values: `Deployment` or `StatefulSet`", + "enum": ["Deployment", "StatefulSet"] + }, + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + } + } + } + } + }, + "replica": { + "type": "object", + "title": "Redis replicas settings", + "form": true, + "hidden": { + "value": "standalone", + "path": "architecture" + }, + "properties": { + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of Redis replicas" + }, + "persistence": { + "type": "object", + "title": "Persistence for Redis replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "replica/persistence/enabled" + } + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/charts/modules/redis/values.yaml b/charts/modules/redis/values.yaml new file mode 100644 index 0000000..9359c0a --- /dev/null +++ b/charts/modules/redis/values.yaml @@ -0,0 +1,1673 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.redis.password Global Redis® password (overrides `auth.password`) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + redis: + password: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param secretAnnotations Annotations to add to secret +## +secretAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section Redis® Image parameters +## + +## Bitnami Redis® image +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## @param image.registry Redis® image registry +## @param image.repository Redis® image repository +## @param image.tag Redis® image tag (immutable tags are recommended) +## @param image.digest Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Redis® image pull policy +## @param image.pullSecrets Redis® image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: docker.io + repository: bitnami/redis + tag: 7.0.5-debian-11-r15 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + +## @section Redis® common configuration parameters +## https://github.com/bitnami/containers/tree/main/bitnami/redis#configuration +## + +## @param architecture Redis® architecture. Allowed values: `standalone` or `replication` +## +architecture: replication +## Redis® Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/redis#setting-the-server-password-on-first-run +## +auth: + ## @param auth.enabled Enable password authentication + ## + enabled: true + ## @param auth.sentinel Enable password authentication on sentinels too + ## + sentinel: true + ## @param auth.password Redis® password + ## Defaults to a random 10-character alphanumeric string if not set + ## + password: "" + ## @param auth.existingSecret The name of an existing secret with Redis® credentials + ## NOTE: When it's set, the previous `auth.password` parameter is ignored + ## + existingSecret: "" + ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "" + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + +## @param commonConfiguration [string] Common configuration to be added into the ConfigMap +## ref: https://redis.io/topics/config +## +commonConfiguration: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis® nodes +## +existingConfigmap: "" + +## @section Redis® master configuration parameters +## + +master: + ## @param master.count Number of Redis® master instances to deploy (experimental, requires additional configuration) + ## + count: 1 + ## @param master.configuration Configuration for Redis® master nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param master.disableCommands Array with Redis® commands to disable on master nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.preExecCmds Additional commands to run prior to starting Redis® master + ## + preExecCmds: [] + ## @param master.extraFlags Array with additional command line flags for Redis® master + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param master.extraEnvVars Array with extra environment variables to add to Redis® master nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® master nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® master nodes + ## + extraEnvVarsSecret: "" + ## @param master.containerPorts.redis Container port to open on Redis® master nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.startupProbe.enabled Enable startupProbe on Redis® master nodes + ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param master.startupProbe.periodSeconds Period seconds for startupProbe + ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param master.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.livenessProbe.enabled Enable livenessProbe on Redis® master nodes + ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable readinessProbe on Redis® master nodes + ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis® master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Redis® master containers + ## @param master.resources.requests The requested resources for the Redis® master containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled Redis® master pods' Security Context + ## @param master.podSecurityContext.fsGroup Set Redis® master pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled Redis® master containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set Redis® master containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param master.kind Use either Deployment or StatefulSet (default) + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + ## + kind: StatefulSet + ## @param master.schedulerName Alternate scheduler for Redis® master pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.updateStrategy.type Redis® master statefulset strategy type + ## @skip master.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + rollingUpdate: {} + ## @param master.priorityClassName Redis® master pods' priorityClassName + ## + priorityClassName: "" + ## @param master.hostAliases Redis® master pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for Redis® master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for Redis® master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis® master pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param master.nodeSelector Node labels for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.topologySpreadConstraints Spread Constraints for Redis® master pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param master.dnsPolicy DNS Policy for Redis® master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + dnsPolicy: "" + ## @param master.dnsConfig DNS Configuration for Redis® master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + dnsConfig: {} + ## @param master.lifecycleHooks for the Redis® master container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis® master pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® master container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the Redis® master pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the Redis® master pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Redis® master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param master.persistence.path The path the volume will be mounted at on Redis® master containers + ## NOTE: Useful when using different Redis® images + ## + path: /data + ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis® master containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param master.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® master containers + ## + subPathExpr: "" + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param master.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param master.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param master.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires master.persistence.enabled: true + ## + existingClaim: "" + ## Redis® master service parameters + ## + service: + ## @param master.service.type Redis® master service type + ## + type: ClusterIP + ## @param master.service.ports.redis Redis® master service port + ## + ports: + redis: 6379 + ## @param master.service.nodePorts.redis Node port for Redis® master + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param master.service.externalTrafficPolicy Redis® master service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param master.service.internalTrafficPolicy Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ + ## + internalTrafficPolicy: Cluster + ## @param master.service.clusterIP Redis® master service Cluster IP + ## + clusterIP: "" + ## @param master.service.loadBalancerIP Redis® master service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param master.service.loadBalancerSourceRanges Redis® master service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param master.service.annotations Additional custom annotations for Redis® master service + ## + annotations: {} + ## @param master.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param master.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods + ## + terminationGracePeriodSeconds: 30 + ## ServiceAccount configuration + ## + serviceAccount: + ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: false + ## @param master.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param master.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param master.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Redis® replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Redis® replicas to deploy + ## + replicaCount: 3 + ## @param replica.configuration Configuration for Redis® replicas nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param replica.disableCommands Array with Redis® commands to disable on replicas nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param replica.command Override default container command (useful when using custom images) + ## + command: [] + ## @param replica.args Override default container args (useful when using custom images) + ## + args: [] + ## @param replica.preExecCmds Additional commands to run prior to starting Redis® replicas + ## + preExecCmds: [] + ## @param replica.extraFlags Array with additional command line flags for Redis® replicas + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param replica.extraEnvVars Array with extra environment variables to add to Redis® replicas nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® replicas nodes + ## + extraEnvVarsCM: "" + ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® replicas nodes + ## + extraEnvVarsSecret: "" + ## @param replica.externalMaster.enabled Use external master for bootstrapping + ## @param replica.externalMaster.host External master host to bootstrap from + ## @param replica.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param replica.containerPorts.redis Container port to open on Redis® replicas nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param replica.startupProbe.enabled Enable startupProbe on Redis® replicas nodes + ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe + ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param replica.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes + ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes + ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis® replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Redis® replicas containers + ## @param replica.resources.requests The requested resources for the Redis® replicas containers + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.podSecurityContext.enabled Enabled Redis® replicas pods' Security Context + ## @param replica.podSecurityContext.fsGroup Set Redis® replicas pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.containerSecurityContext.enabled Enabled Redis® replicas containers' Security Context + ## @param replica.containerSecurityContext.runAsUser Set Redis® replicas containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param replica.schedulerName Alternate scheduler for Redis® replicas pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param replica.updateStrategy.type Redis® replicas statefulset strategy type + ## @skip replica.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + rollingUpdate: {} + ## @param replica.priorityClassName Redis® replicas pods' priorityClassName + ## + priorityClassName: "" + ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## @param replica.hostAliases Redis® replicas pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param replica.podLabels Extra labels for Redis® replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param replica.podAnnotations Annotations for Redis® replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis® replicas pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set + ## + key: "" + ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param replica.affinity Affinity for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param replica.nodeSelector Node labels for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param replica.tolerations Tolerations for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param replica.topologySpreadConstraints Spread Constraints for Redis® replicas pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param replica.dnsPolicy DNS Policy for Redis® replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + dnsPolicy: "" + ## @param replica.dnsConfig DNS Configuration for Redis® replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + dnsConfig: {} + ## @param replica.lifecycleHooks for the Redis® replica container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis® replicas pod(s) + ## + extraVolumes: [] + ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) + ## + extraVolumeMounts: [] + ## @param replica.sidecars Add additional sidecar containers to the Redis® replicas pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param replica.initContainers Add additional init containers to the Redis® replicas pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Redis® replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param replica.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param replica.persistence.path The path the volume will be mounted at on Redis® replicas containers + ## NOTE: Useful when using different Redis® images + ## + path: /data + ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis® replicas containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param replica.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers + ## + subPathExpr: "" + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param replica.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param replica.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param replica.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param replica.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires replica.persistence.enabled: true + ## + existingClaim: "" + ## Redis® replicas service parameters + ## + service: + ## @param replica.service.type Redis® replicas service type + ## + type: ClusterIP + ## @param replica.service.ports.redis Redis® replicas service port + ## + ports: + redis: 6379 + ## @param replica.service.nodePorts.redis Node port for Redis® replicas + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param replica.service.externalTrafficPolicy Redis® replicas service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param replica.service.internalTrafficPolicy Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ + ## + internalTrafficPolicy: Cluster + ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param replica.service.clusterIP Redis® replicas service Cluster IP + ## + clusterIP: "" + ## @param replica.service.loadBalancerIP Redis® replicas service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param replica.service.loadBalancerSourceRanges Redis® replicas service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param replica.service.annotations Additional custom annotations for Redis® replicas service + ## + annotations: {} + ## @param replica.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param replica.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods + ## + terminationGracePeriodSeconds: 30 + ## Autoscaling configuration + ## + autoscaling: + ## @param replica.autoscaling.enabled Enable replica autoscaling settings + ## + enabled: false + ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling + ## + minReplicas: 1 + ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling + ## + maxReplicas: 11 + ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling + ## + targetCPU: "" + ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling + ## + targetMemory: "" + ## ServiceAccount configuration + ## + serviceAccount: + ## @param replica.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: false + ## @param replica.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param replica.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param replica.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## @section Redis® Sentinel configuration parameters +## + +sentinel: + ## @param sentinel.enabled Use Redis® Sentinel on Redis® pods. + ## IMPORTANT: this will disable the master and replicas services and + ## create a single Redis® service exposing both the Redis and Sentinel ports + ## + enabled: false + ## Bitnami Redis® Sentinel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## @param sentinel.image.registry Redis® Sentinel image registry + ## @param sentinel.image.repository Redis® Sentinel image repository + ## @param sentinel.image.tag Redis® Sentinel image tag (immutable tags are recommended) + ## @param sentinel.image.digest Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sentinel.image.pullPolicy Redis® Sentinel image pull policy + ## @param sentinel.image.pullSecrets Redis® Sentinel image pull secrets + ## @param sentinel.image.debug Enable image debug mode + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + tag: 7.0.5-debian-11-r14 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param sentinel.masterSet Master set name + ## + masterSet: mymaster + ## @param sentinel.quorum Sentinel Quorum + ## + quorum: 2 + ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out. + ## NOTE: This is directly related to the startupProbes which are configured to run every 10 seconds for a total of 22 failures. If adjusting this value, also adjust the startupProbes. + getMasterTimeout: 220 + ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. + ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. + ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. + ## + automateClusterRecovery: false + ## Sentinel timing restrictions + ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis® node is down + ## @param sentinel.failoverTimeout Timeout for performing a election failover + ## + downAfterMilliseconds: 60000 + failoverTimeout: 180000 + ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover + ## + parallelSyncs: 1 + ## @param sentinel.configuration Configuration for Redis® Sentinel nodes + ## ref: https://redis.io/topics/sentinel + ## + configuration: "" + ## @param sentinel.command Override default container command (useful when using custom images) + ## + command: [] + ## @param sentinel.args Override default container args (useful when using custom images) + ## + args: [] + ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis® Sentinel + ## + preExecCmds: [] + ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis® Sentinel nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes + ## + extraEnvVarsCM: "" + ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® Sentinel nodes + ## + extraEnvVarsSecret: "" + ## @param sentinel.externalMaster.enabled Use external master for bootstrapping + ## @param sentinel.externalMaster.host External master host to bootstrap from + ## @param sentinel.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param sentinel.containerPorts.sentinel Container port to open on Redis® Sentinel nodes + ## + containerPorts: + sentinel: 26379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis® Sentinel nodes + ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe + ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis® Sentinel nodes + ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes + ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: false + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 100Mi + ## @param sentinel.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param sentinel.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param sentinel.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param sentinel.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## Redis® Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Redis® Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Redis® Sentinel containers + ## + resources: + limits: {} + requests: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param sentinel.containerSecurityContext.enabled Enabled Redis® Sentinel containers' Security Context + ## @param sentinel.containerSecurityContext.runAsUser Set Redis® Sentinel containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param sentinel.lifecycleHooks for the Redis® sentinel container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis® Sentinel + ## + extraVolumes: [] + ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) + ## + extraVolumeMounts: [] + ## Redis® Sentinel service parameters + ## + service: + ## @param sentinel.service.type Redis® Sentinel service type + ## + type: ClusterIP + ## @param sentinel.service.ports.redis Redis® service port for Redis® + ## @param sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel + ## + ports: + redis: 6379 + sentinel: 26379 + ## @param sentinel.service.nodePorts.redis Node port for Redis® + ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## NOTE: By leaving these values blank, they will be generated by ports-configmap + ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port + ## + nodePorts: + redis: "" + sentinel: "" + ## @param sentinel.service.externalTrafficPolicy Redis® Sentinel service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param sentinel.service.clusterIP Redis® Sentinel service Cluster IP + ## + clusterIP: "" + ## @param sentinel.service.loadBalancerIP Redis® Sentinel service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param sentinel.service.loadBalancerSourceRanges Redis® Sentinel service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param sentinel.service.annotations Additional custom annotations for Redis® Sentinel service + ## + annotations: {} + ## @param sentinel.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param sentinel.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods + ## + terminationGracePeriodSeconds: 30 + +## @section Other Parameters +## + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## Redis® is listening on. When true, Redis® will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules + ## + enabled: false +## RBAC configuration +## +rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Redis® Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic + ## + enabled: false + ## @param tls.authClients Require clients to authenticate + ## + authClients: true + ## @param tls.autoGenerated Enable autogenerated certificates + ## + autoGenerated: false + ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates + ## + existingSecret: "" + ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate Key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## + certCAFilename: "" + ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) + ## + dhParamsFilename: "" + +## @section Metrics Parameters +## + +metrics: + ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis® metrics + ## + enabled: false + ## Bitnami Redis® Exporter image + ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ + ## @param metrics.image.registry Redis® Exporter image registry + ## @param metrics.image.repository Redis® Exporter image repository + ## @param metrics.image.tag Redis® Exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy Redis® Exporter image pull policy + ## @param metrics.image.pullSecrets Redis® Exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.45.0-debian-11-r1 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.command Override default metrics container init command (useful when using custom images) + ## + command: [] + ## @param metrics.redisTargetHost A way to specify an alternative Redis® hostname + ## Useful for certificate CN/SAN matching + ## + redisTargetHost: "localhost" + ## @param metrics.extraArgs Extra arguments for Redis® exporter, for example: + ## e.g.: + ## extraArgs: + ## check-keys: myKey,myOtherKey + ## + extraArgs: {} + ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis® exporter + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.containerSecurityContext.enabled Enabled Redis® exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set Redis® exporter containers' Security Context runAsUser + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis® metrics sidecar + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar + ## + extraVolumeMounts: [] + ## Redis® exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the Redis® exporter container + ## @param metrics.resources.requests The requested resources for the Redis® exporter container + ## + resources: + limits: {} + requests: {} + ## @param metrics.podLabels Extra labels for Redis® exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.podAnnotations [object] Annotations for Redis® exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + ## Redis® exporter service parameters + ## + service: + ## @param metrics.service.type Redis® exporter service type + ## + type: ClusterIP + ## @param metrics.service.port Redis® exporter service port + ## + port: 9121 + ## @param metrics.service.externalTrafficPolicy Redis® exporter service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param metrics.service.loadBalancerIP Redis® exporter service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param metrics.service.loadBalancerSourceRanges Redis® exporter service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param metrics.service.annotations Additional custom annotations for Redis® exporter service + ## + annotations: {} + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics + ## + podTargetLabels: [] + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Custom Prometheus rules + ## e.g: + ## rules: + ## - alert: RedisDown + ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} down + ## description: Redis® instance {{ "{{ $labels.instance }}" }} is down + ## - alert: RedisMemoryHigh + ## expr: > + ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 + ## / + ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} + ## > 90 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} is using too much memory + ## description: | + ## Redis® instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + ## - alert: RedisKeyEviction + ## expr: | + ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 + ## for: 1s + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} has evicted keys + ## description: | + ## Redis® instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + ## + rules: [] + +## @section Init Container Parameters +## + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## Bitnami Shell image + ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ + ## @param volumePermissions.image.registry Bitnami Shell image registry + ## @param volumePermissions.image.repository Bitnami Shell image repository + ## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy + ## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r48 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits The resources limits for the init container + ## @param volumePermissions.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + ## Init container Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser + ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the + ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) + ## + containerSecurityContext: + runAsUser: 0 + +## init-sysctl container parameters +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctl: + ## @param sysctl.enabled Enable init container to modify Kernel settings + ## + enabled: false + ## Bitnami Shell image + ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ + ## @param sysctl.image.registry Bitnami Shell image registry + ## @param sysctl.image.repository Bitnami Shell image repository + ## @param sysctl.image.tag Bitnami Shell image tag (immutable tags are recommended) + ## @param sysctl.image.digest Bitnami Shell image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sysctl.image.pullPolicy Bitnami Shell image pull policy + ## @param sysctl.image.pullSecrets Bitnami Shell image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r48 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) + ## + command: [] + ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` + ## + mountHostSys: false + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sysctl.resources.limits The resources limits for the init container + ## @param sysctl.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + +## @section useExternalDNS Parameters +## +## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. +## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. +## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations. +## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. +## +useExternalDNS: + enabled: false + suffix: "" + annotationKey: external-dns.alpha.kubernetes.io/ + additionalAnnotations: {} diff --git a/charts/modules/sentry/REAMD.md b/charts/modules/sentry/REAMD.md new file mode 100644 index 0000000..787351e --- /dev/null +++ b/charts/modules/sentry/REAMD.md @@ -0,0 +1,54 @@ +### sentry 安装方法 +[参考文档](https://artifacthub.io/packages/helm/sentry/sentry) +[Usage with Terraform + AWS](https://artifacthub.io/packages/helm/aws/aws-load-balancer-controller) + +#### 准备 +1. helm repo add sentry https://sentry-kubernetes.github.io/charts +2. eks alb 网络组件 +``` +# 安装 controller +kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" + +helm repo add eks https://aws.github.io/eks-charts + +helm install aws-load-balancer-controller eks/aws-load-balancer-controller \ +--set clusterName=$CLUSTER \ +--set serviceAccount.create=false \ +--set region=$REGION \ +--set vpcId=$VPCID \ +--set serviceAccount.name=aws-load-balancer-controller \ +--set image.repository=918309763551.dkr.ecr.cn-north-1.amazonaws.com.cn/amazon/aws-load-balancer-controller \ +-n kube-system + + +helm --kubeconfig ~/.kube_eks/config install aws-load-balancer-controller eks/aws-load-balancer-controller \ +--set clusterName=tf-sentry \ +--set serviceAccount.create=false \ +--set region=cn-northwest-1 \ +--set image.repository=918309763551.dkr.ecr.cn-north-1.amazonaws.com.cn/amazon/aws-load-balancer-controller \ +-n kube-system + +``` + +#### 配置文件 +values.yaml + +#### 测试 +helm install --dry-run --debug sentry sentry/sentry -f values.yaml -n sentry + +#### 正式安装 +helm install --debug sentry sentry/sentry -f values.yaml -n sentry + +#### 更新配置 +helm upgrade --debug sentry sentry/sentry -f values.yaml -n sentry + +#### 查看安装结果 +kubectl get pods -n sentry + + +#### 手动创建客户(非必须) +``` +登陆sentry-web的pod,用命令创建超级用户: +kubectl exec -it sentry-web-xxxx -n sentry -- bash +sentry createuser +``` \ No newline at end of file diff --git a/charts/modules/sentry/values.yaml b/charts/modules/sentry/values.yaml new file mode 100644 index 0000000..ff1b7b6 --- /dev/null +++ b/charts/modules/sentry/values.yaml @@ -0,0 +1,3123 @@ +asHook: true +auth: + register: true +clickhouse: + clickhouse: + configmap: + builtin_dictionaries_reload_interval: "3600" + compression: + cases: + - method: zstd + min_part_size: "10000000000" + min_part_size_ratio: "0.01" + enabled: false + default_session_timeout: "60" + disable_internal_dns_cache: "1" + enabled: true + graphite: + config: + - asynchronous_metrics: true + events: true + events_cumulative: true + interval: "60" + metrics: true + root_path: one_min + timeout: "0.1" + enabled: false + keep_alive_timeout: "3" + logger: + count: "10" + level: trace + path: /var/log/clickhouse-server + size: 1000M + stdoutLogsEnabled: false + mark_cache_size: "5368709120" + max_concurrent_queries: "100" + max_connections: "4096" + max_session_timeout: "3600" + mlock_executable: false + profiles: + enabled: false + profile: + - config: + load_balancing: random + max_memory_usage: "10000000000" + use_uncompressed_cache: "0" + name: default + quotas: + enabled: false + quota: + - config: + - duration: "3600" + errors: "0" + execution_time: "0" + queries: "0" + read_rows: "0" + result_rows: "0" + name: default + remote_servers: + enabled: true + internal_replication: true + replica: + backup: + enabled: false + compression: true + user: default + umask: "022" + uncompressed_cache_size: "8589934592" + users: + enabled: false + user: + - config: + networks: + - ::/0 + password: "" + profile: default + quota: default + name: default + zookeeper_servers: + config: + - hostTemplate: '{{ .Release.Name }}-zookeeper-clickhouse' + index: clickhouse + port: "2181" + enabled: true + operation_timeout_ms: "10000" + session_timeout_ms: "30000" + http_port: "8123" + image: yandex/clickhouse-server + imagePullPolicy: IfNotPresent + imageVersion: 20.8.19.4 + ingress: + enabled: false + init: + image: busybox + imagePullPolicy: IfNotPresent + imageVersion: 1.31.0 + resources: {} + interserver_http_port: "9009" + livenessProbe: + enabled: true + failureThreshold: "3" + initialDelaySeconds: "30" + periodSeconds: "30" + successThreshold: "1" + timeoutSeconds: "5" + metrics: + enabled: false + image: + port: 9116 + pullPolicy: IfNotPresent + registry: docker.io + repository: f1yegor/clickhouse-exporter + tag: latest + podAnnotations: + prometheus.io/port: "9116" + prometheus.io/scrape: "true" + podLabels: {} + prometheusRule: + additionalLabels: {} + enabled: false + namespace: "" + rules: [] + service: + annotations: {} + labels: {} + type: ClusterIP + serviceMonitor: + enabled: false + selector: + prometheus: kube-prometheus + path: /var/lib/clickhouse + persistentVolumeClaim: + dataPersistentVolume: + accessModes: + - ReadWriteOnce + enabled: true + storage: 30Gi + enabled: true + logsPersistentVolume: + accessModes: + - ReadWriteOnce + enabled: false + storage: 50Gi + podManagementPolicy: Parallel + podSecurityContext: {} + readinessProbe: + enabled: true + failureThreshold: "3" + initialDelaySeconds: "30" + periodSeconds: "30" + successThreshold: "1" + timeoutSeconds: "5" + replicas: "3" + resources: {} + securityContext: {} + tcp_port: "9000" + updateStrategy: RollingUpdate + clusterDomain: cluster.local + enabled: true + global: {} + serviceAccount: + annotations: {} + automountServiceAccountToken: true + enabled: false + name: clickhouse + tabix: + enabled: false + image: spoonest/clickhouse-tabix-web-client + imagePullPolicy: IfNotPresent + imageVersion: stable + ingress: + enabled: false + livenessProbe: + enabled: true + failureThreshold: "3" + initialDelaySeconds: "30" + periodSeconds: "30" + successThreshold: "1" + timeoutSeconds: "5" + podAnnotations: null + podLabels: null + readinessProbe: + enabled: true + failureThreshold: "3" + initialDelaySeconds: "30" + periodSeconds: "30" + successThreshold: "1" + timeoutSeconds: "5" + replicas: "1" + resources: {} + security: + password: admin + user: admin + updateStrategy: + maxSurge: 3 + maxUnavailable: 1 + type: RollingUpdate + timezone: UTC +config: + configYml: {} + relay: | + # No YAML relay config given + sentryConfPy: | + # No Python Extension Config Given + snubaSettingsPy: | + # No Python Extension Config Given +externalClickhouse: + database: default + host: clickhouse + httpPort: 8123 + password: "" + singleNode: true + tcpPort: 9000 + username: default +externalKafka: + port: 9092 +externalPostgresql: + database: sentry + port: 5432 + username: postgres +externalRedis: + port: 6379 +filestore: + backend: filesystem + filesystem: + path: /var/lib/sentry/files + persistence: + accessMode: ReadWriteOnce + enabled: true + existingClaim: "" + persistentWorkers: false + size: 160Gi + gcs: {} + s3: {} +geodata: + mountPath: "" + path: "" + volumeName: "" +github: {} +google: {} +hooks: + activeDeadlineSeconds: 100 + dbCheck: + affinity: {} + env: [] + image: + imagePullSecrets: [] + nodeSelector: {} + podAnnotations: {} + resources: + limits: + memory: 64Mi + requests: + cpu: 100m + memory: 64Mi + securityContext: {} + dbInit: + affinity: {} + env: [] + nodeSelector: {} + podAnnotations: {} + resources: + limits: + memory: 2048Mi + requests: + cpu: 300m + memory: 2048Mi + sidecars: [] + volumes: [] + enabled: true + removeOnSuccess: true + shareProcessNamespace: false + snubaInit: + affinity: {} + nodeSelector: {} + podAnnotations: {} + resources: + limits: + cpu: 2000m + memory: 1Gi + requests: + cpu: 700m + memory: 1Gi + snubaMigrate: {} +images: + relay: + imagePullSecrets: [] + sentry: + imagePullSecrets: [] + snuba: + imagePullSecrets: [] + symbolicator: + imagePullSecrets: [] + tag: 0.5.1 +ingress: + enabled: true + regexPathStyle: aws-alb + alb: + httpRedirect: true + path: "/*" + annotations: + kubernetes.io/ingress.class: alb + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: ip +kafka: + advertisedListeners: [] + affinity: {} + allowEveryoneIfNoAclFound: true + allowPlaintextListener: true + args: [] + auth: + clientProtocol: plaintext + externalClientProtocol: "" + interBrokerProtocol: plaintext + sasl: + interBrokerMechanism: plain + jaas: + clientPasswords: [] + clientUsers: + - user + existingSecret: "" + interBrokerPassword: "" + interBrokerUser: admin + zookeeperPassword: "" + zookeeperUser: "" + mechanisms: plain,scram-sha-256,scram-sha-512 + tls: + autoGenerated: false + endpointIdentificationAlgorithm: https + existingSecret: "" + existingSecrets: [] + jksKeystoreSAN: "" + jksTruststore: "" + jksTruststoreSecret: "" + password: "" + pemChainIncluded: false + type: jks + zookeeper: + tls: + enabled: false + existingSecret: "" + existingSecretKeystoreKey: zookeeper.keystore.jks + existingSecretTruststoreKey: zookeeper.truststore.jks + passwordsSecret: "" + passwordsSecretKeystoreKey: keystore-password + passwordsSecretTruststoreKey: truststore-password + type: jks + verifyHostname: true + authorizerClassName: "" + autoCreateTopicsEnable: true + clusterDomain: cluster.local + command: + - /scripts/setup.sh + common: + exampleValue: common-chart + global: + imagePullSecrets: [] + imageRegistry: "" + storageClass: "" + commonAnnotations: {} + commonLabels: {} + config: "" + containerPorts: + client: 9092 + external: 9094 + internal: 9093 + containerSecurityContext: + enabled: true + runAsNonRoot: true + runAsUser: 1001 + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + defaultReplicationFactor: 3 + deleteTopicEnable: false + diagnosticMode: + args: + - infinity + command: + - sleep + enabled: false + enabled: true + existingConfigmap: "" + existingLog4jConfigMap: "" + externalAccess: + autoDiscovery: + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/kubectl + tag: 1.24.0-debian-10-r2 + resources: + limits: {} + requests: {} + enabled: false + service: + annotations: {} + domain: "" + extraPorts: [] + loadBalancerAnnotations: [] + loadBalancerIPs: [] + loadBalancerNames: [] + loadBalancerSourceRanges: [] + nodePorts: [] + ports: + external: 9094 + type: LoadBalancer + useHostIPs: false + usePodIPs: false + externalZookeeper: + servers: [] + extraDeploy: [] + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + extraVolumeMounts: [] + extraVolumes: [] + fullnameOverride: "" + global: + imagePullSecrets: [] + imageRegistry: "" + storageClass: "" + heapOpts: -Xmx1024m -Xms1024m + hostAliases: [] + hostIPC: false + hostNetwork: false + image: + debug: false + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/kafka + tag: 3.1.1-debian-10-r6 + initContainers: [] + interBrokerListenerName: INTERNAL + kubeVersion: "" + lifecycleHooks: {} + listenerSecurityProtocolMap: "" + listeners: [] + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + log4j: "" + logFlushIntervalMessages: _10000 + logFlushIntervalMs: 1000 + logPersistence: + accessModes: + - ReadWriteOnce + annotations: {} + enabled: false + existingClaim: "" + mountPath: /opt/bitnami/kafka/logs + selector: {} + size: 8Gi + storageClass: "" + logRetentionBytes: _1073741824 + logRetentionCheckIntervalMs: 300000 + logRetentionHours: 168 + logSegmentBytes: _1073741824 + logsDirs: /bitnami/kafka/data + maxMessageBytes: "50000000" + metrics: + jmx: + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + containerPorts: + metrics: 5556 + containerSecurityContext: + enabled: true + runAsNonRoot: true + runAsUser: 1001 + enabled: false + existingConfigmap: "" + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.16.1-debian-10-r303 + resources: + limits: {} + requests: {} + service: + annotations: + prometheus.io/path: / + prometheus.io/port: '{{ .Values.metrics.jmx.service.ports.metrics }}' + prometheus.io/scrape: "true" + clusterIP: "" + ports: + metrics: 5556 + sessionAffinity: None + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + kafka: + affinity: {} + args: [] + certificatesSecret: "" + command: [] + containerPorts: + metrics: 9308 + containerSecurityContext: + enabled: true + runAsNonRoot: true + runAsUser: 1001 + enabled: false + extraFlags: {} + extraVolumeMounts: [] + extraVolumes: [] + hostAliases: [] + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.4.2-debian-10-r240 + initContainers: [] + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + podSecurityContext: + enabled: true + fsGroup: 1001 + resources: + limits: {} + requests: {} + schedulerName: "" + service: + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '{{ .Values.metrics.kafka.service.ports.metrics }}' + prometheus.io/scrape: "true" + clusterIP: "" + ports: + metrics: 9308 + sessionAffinity: None + serviceAccount: + automountServiceAccountToken: true + create: true + name: "" + sidecars: [] + tlsCaCert: ca-file + tlsCaSecret: "" + tlsCert: cert-file + tlsKey: key-file + tolerations: [] + serviceMonitor: + enabled: false + honorLabels: false + interval: "" + jobLabel: "" + labels: {} + metricRelabelings: [] + namespace: "" + relabelings: [] + scrapeTimeout: "" + selector: {} + minBrokerId: 0 + nameOverride: "" + networkPolicy: + allowExternal: true + egressRules: + customRules: [] + enabled: false + explicitNamespacesSelector: {} + externalAccess: + from: [] + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + numIoThreads: 8 + numNetworkThreads: 3 + numPartitions: 1 + numRecoveryThreadsPerDataDir: 1 + offsetsTopicReplicationFactor: 3 + pdb: + create: false + maxUnavailable: 1 + minAvailable: "" + persistence: + accessModes: + - ReadWriteOnce + annotations: {} + enabled: true + existingClaim: "" + mountPath: /bitnami/kafka + selector: {} + size: 8Gi + storageClass: "" + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + podManagementPolicy: Parallel + podSecurityContext: + enabled: true + fsGroup: 1001 + priorityClassName: "" + provisioning: + args: [] + auth: + tls: + caCert: ca.crt + cert: tls.crt + certificatesSecret: "" + key: tls.key + keyPassword: "" + keyPasswordSecretKey: key-password + keystore: keystore.jks + keystorePassword: "" + keystorePasswordSecretKey: keystore-password + passwordsSecret: "" + truststore: truststore.jks + truststorePassword: "" + truststorePasswordSecretKey: truststore-password + type: jks + command: [] + containerSecurityContext: + enabled: true + runAsNonRoot: true + runAsUser: 1001 + enabled: false + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + extraProvisioningCommands: [] + extraVolumeMounts: [] + extraVolumes: [] + initContainers: [] + numPartitions: 1 + parallel: 1 + podAnnotations: {} + podLabels: {} + podSecurityContext: + enabled: true + fsGroup: 1001 + postScript: "" + preScript: "" + replicationFactor: 1 + resources: + limits: {} + requests: {} + schedulerName: "" + sidecars: [] + topics: [] + waitForKafka: true + rbac: + create: false + readinessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + replicaCount: 3 + resources: + limits: {} + requests: {} + schedulerName: "" + service: + annotations: {} + clusterIP: "" + externalTrafficPolicy: Cluster + extraPorts: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: + client: "" + external: "" + ports: + client: 9092 + external: 9094 + internal: 9093 + sessionAffinity: None + type: ClusterIP + serviceAccount: + annotations: {} + automountServiceAccountToken: true + create: true + name: "" + sidecars: [] + socketReceiveBufferBytes: 102400 + socketRequestMaxBytes: "50000000" + socketSendBufferBytes: 102400 + startupProbe: + enabled: false + failureThreshold: 15 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + superUsers: User:admin + terminationGracePeriodSeconds: "" + tolerations: [] + topologySpreadConstraints: {} + transactionStateLogMinIsr: 3 + transactionStateLogReplicationFactor: 3 + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + volumePermissions: + containerSecurityContext: + runAsUser: 0 + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r431 + resources: + limits: {} + requests: {} + zookeeper: + affinity: {} + args: [] + auth: + clientPassword: "" + clientUser: "" + enabled: false + existingSecret: "" + serverPasswords: "" + serverUsers: "" + autopurge: + purgeInterval: 0 + snapRetainCount: 3 + clusterDomain: cluster.local + command: + - /scripts/setup.sh + common: + exampleValue: common-chart + global: + imagePullSecrets: [] + imageRegistry: "" + storageClass: "" + commonAnnotations: {} + commonLabels: {} + configuration: "" + containerPorts: + client: 2181 + election: 3888 + follower: 2888 + tls: 3181 + containerSecurityContext: + enabled: true + runAsNonRoot: true + runAsUser: 1001 + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + dataLogDir: "" + diagnosticMode: + args: + - infinity + command: + - sleep + enabled: false + enabled: true + existingConfigmap: "" + extraDeploy: [] + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + extraVolumeMounts: [] + extraVolumes: [] + fourlwCommandsWhitelist: srvr, mntr, ruok + fullnameOverride: "" + global: + imagePullSecrets: [] + imageRegistry: "" + storageClass: "" + heapSize: 1024 + hostAliases: [] + image: + debug: false + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/zookeeper + tag: 3.8.0-debian-10-r63 + initContainers: [] + initLimit: 10 + jvmFlags: "" + kubeVersion: "" + lifecycleHooks: {} + listenOnAllIPs: false + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + probeCommandTimeout: 2 + successThreshold: 1 + timeoutSeconds: 5 + logLevel: ERROR + maxClientCnxns: 60 + maxSessionTimeout: 40000 + metrics: + containerPort: 9141 + enabled: false + prometheusRule: + additionalLabels: {} + enabled: false + namespace: "" + rules: [] + service: + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '{{ .Values.metrics.service.port }}' + prometheus.io/scrape: "true" + port: 9141 + type: ClusterIP + serviceMonitor: + additionalLabels: {} + enabled: false + honorLabels: false + interval: "" + jobLabel: "" + metricRelabelings: [] + namespace: "" + relabelings: [] + scrapeTimeout: "" + selector: {} + minServerId: 1 + nameOverride: "" + namespaceOverride: "" + networkPolicy: + allowExternal: true + enabled: false + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + pdb: + create: false + maxUnavailable: 1 + minAvailable: "" + persistence: + accessModes: + - ReadWriteOnce + annotations: {} + dataLogDir: + existingClaim: "" + selector: {} + size: 8Gi + enabled: true + existingClaim: "" + selector: {} + size: 8Gi + storageClass: "" + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + podManagementPolicy: Parallel + podSecurityContext: + enabled: true + fsGroup: 1001 + preAllocSize: 65536 + priorityClassName: "" + readinessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + probeCommandTimeout: 2 + successThreshold: 1 + timeoutSeconds: 5 + replicaCount: 1 + resources: + limits: {} + requests: + cpu: 250m + memory: 256Mi + schedulerName: "" + service: + annotations: {} + clusterIP: "" + disableBaseClientPort: false + externalTrafficPolicy: Cluster + extraPorts: [] + headless: + annotations: {} + publishNotReadyAddresses: true + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: + client: "" + tls: "" + ports: + client: 2181 + election: 3888 + follower: 2888 + tls: 3181 + sessionAffinity: None + type: ClusterIP + serviceAccount: + annotations: {} + automountServiceAccountToken: true + create: false + name: "" + sidecars: [] + snapCount: 100000 + startupProbe: + enabled: false + failureThreshold: 15 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + syncLimit: 5 + tickTime: 2000 + tls: + client: + auth: none + autoGenerated: false + enabled: false + existingSecret: "" + existingSecretKeystoreKey: "" + existingSecretTruststoreKey: "" + keystorePassword: "" + keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks + passwordsSecretKeystoreKey: "" + passwordsSecretName: "" + passwordsSecretTruststoreKey: "" + truststorePassword: "" + truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks + quorum: + auth: none + autoGenerated: false + enabled: false + existingSecret: "" + existingSecretKeystoreKey: "" + existingSecretTruststoreKey: "" + keystorePassword: "" + keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks + passwordsSecretKeystoreKey: "" + passwordsSecretName: "" + passwordsSecretTruststoreKey: "" + truststorePassword: "" + truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks + resources: + limits: {} + requests: {} + tolerations: [] + topologySpreadConstraints: {} + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + volumePermissions: + containerSecurityContext: + runAsUser: 0 + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r430 + resources: + limits: {} + requests: {} + zookeeperChrootPath: "" + zookeeperConnectionTimeoutMs: 6000 +mail: + backend: dummy + from: "" + host: "" + password: "" + port: 25 + useSsl: false + useTls: false + username: "" +memcached: + affinity: {} + architecture: standalone + args: + - memcached + - -u memcached + - -p 11211 + - -v + - -m $(MEMCACHED_MEMORY_LIMIT) + - -I $(MEMCACHED_MAX_ITEM_SIZE) + auth: + enabled: false + password: "" + username: "" + autoscaling: + enabled: false + maxReplicas: 6 + minReplicas: 3 + targetCPU: 50 + targetMemory: 50 + clusterDomain: cluster.local + command: [] + common: + exampleValue: common-chart + global: + imagePullSecrets: [] + imageRegistry: "" + storageClass: "" + commonAnnotations: {} + commonLabels: {} + containerPorts: + memcached: 11211 + containerSecurityContext: + enabled: true + runAsNonRoot: true + runAsUser: 1001 + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + diagnosticMode: + args: + - infinity + command: + - sleep + enabled: false + extraDeploy: [] + extraEnvVars: [] + extraEnvVarsCM: sentry-memcached + extraEnvVarsSecret: "" + extraVolumeMounts: [] + extraVolumes: [] + fullnameOverride: "" + global: + imagePullSecrets: [] + imageRegistry: "" + storageClass: "" + hostAliases: [] + image: + debug: false + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/memcached + tag: 1.6.15-debian-11-r10 + initContainers: [] + kubeVersion: "" + lifecycleHooks: {} + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + maxItemSize: "26214400" + memoryLimit: "2048" + metrics: + containerPorts: + metrics: 9150 + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/memcached-exporter + tag: 0.10.0-debian-11-r2 + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + podAnnotations: + prometheus.io/port: '{{ .Values.metrics.containerPorts.metrics }}' + prometheus.io/scrape: "true" + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + resources: + limits: {} + requests: {} + service: + annotations: + prometheus.io/port: '{{ .Values.metrics.service.ports.metrics }}' + prometheus.io/scrape: "true" + clusterIP: "" + ports: + metrics: 9150 + sessionAffinity: None + serviceMonitor: + enabled: false + honorLabels: false + interval: "" + jobLabel: "" + labels: {} + metricRelabelings: [] + namespace: "" + relabelings: [] + scrapeTimeout: "" + selector: {} + startupProbe: + enabled: false + failureThreshold: 15 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + nameOverride: "" + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + pdb: + create: false + maxUnavailable: 1 + minAvailable: "" + persistence: + accessModes: + - ReadWriteOnce + annotations: {} + enabled: false + selector: {} + size: 8Gi + storageClass: "" + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + podManagementPolicy: Parallel + podSecurityContext: + enabled: true + fsGroup: 1001 + priorityClassName: "" + readinessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + replicaCount: 1 + resources: + limits: {} + requests: + cpu: 250m + memory: 256Mi + schedulerName: "" + service: + annotations: {} + clusterIP: "" + externalTrafficPolicy: Cluster + extraPorts: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: + memcached: "" + ports: + memcached: 11211 + sessionAffinity: None + sessionAffinityConfig: {} + type: ClusterIP + serviceAccount: + annotations: {} + automountServiceAccountToken: true + create: false + name: "" + sidecars: [] + startupProbe: + enabled: false + failureThreshold: 15 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + terminationGracePeriodSeconds: "" + tolerations: [] + topologySpreadConstraints: [] + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + volumePermissions: + containerSecurityContext: + runAsUser: 0 + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r10 + resources: + limits: {} + requests: {} +metrics: + affinity: {} + enabled: false + image: + pullPolicy: IfNotPresent + repository: prom/statsd-exporter + tag: v0.17.0 + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 2 + nodeSelector: {} + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 2 + resources: {} + securityContext: {} + service: + labels: {} + type: ClusterIP + serviceMonitor: + additionalLabels: {} + enabled: false + namespace: "" + namespaceSelector: {} + scrapeInterval: 30s + tolerations: [] +nginx: + affinity: {} + args: [] + autoscaling: + enabled: false + maxReplicas: "" + minReplicas: "" + targetCPU: "" + targetMemory: "" + cloneStaticSiteFromGit: + branch: "" + enabled: false + extraEnvVars: [] + extraVolumeMounts: [] + gitClone: + args: [] + command: [] + gitSync: + args: [] + command: [] + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/git + tag: 2.36.1-debian-11-r2 + interval: 60 + repository: "" + clusterDomain: cluster.local + command: [] + common: + exampleValue: common-chart + global: + imagePullSecrets: [] + imageRegistry: "" + commonAnnotations: {} + commonLabels: {} + containerPort: 8080 + containerPorts: + http: 8080 + https: "" + containerSecurityContext: + enabled: false + runAsNonRoot: true + runAsUser: 1001 + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + diagnosticMode: + args: + - infinity + command: + - sleep + enabled: false + enabled: true + existingServerBlockConfigmap: '{{ template "sentry.fullname" . }}' + extraDeploy: [] + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + extraVolumeMounts: [] + extraVolumes: [] + fullnameOverride: "" + global: + imagePullSecrets: [] + imageRegistry: "" + healthIngress: + annotations: {} + enabled: false + extraHosts: [] + extraPaths: [] + extraRules: [] + extraTls: [] + hostname: example.local + ingressClassName: "" + path: / + pathType: ImplementationSpecific + secrets: [] + selfSigned: false + tls: false + hostAliases: [] + hostIPC: false + hostNetwork: false + image: + debug: false + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/nginx + tag: 1.22.0-debian-11-r3 + ingress: + annotations: {} + apiVersion: "" + enabled: false + extraHosts: [] + extraPaths: [] + extraRules: [] + extraTls: [] + hostname: nginx.local + ingressClassName: "" + path: / + pathType: ImplementationSpecific + secrets: [] + selfSigned: false + tls: false + initContainers: [] + kubeVersion: "" + lifecycleHooks: {} + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + metrics: + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/nginx-exporter + tag: 0.10.0-debian-11-r2 + podAnnotations: {} + port: "" + prometheusRule: + additionalLabels: {} + enabled: false + namespace: "" + rules: [] + resources: + limits: {} + requests: {} + securityContext: + enabled: false + runAsUser: 1001 + service: + annotations: + prometheus.io/port: '{{ .Values.metrics.service.port }}' + prometheus.io/scrape: "true" + port: 9113 + serviceMonitor: + enabled: false + honorLabels: false + interval: "" + jobLabel: "" + labels: {} + metricRelabelings: [] + namespace: "" + relabelings: [] + scrapeTimeout: "" + selector: {} + nameOverride: "" + namespaceOverride: "" + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + pdb: + create: false + maxUnavailable: 0 + minAvailable: 1 + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + podSecurityContext: + enabled: false + fsGroup: 1001 + sysctls: [] + priorityClassName: "" + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + replicaCount: 1 + resources: + limits: {} + requests: {} + schedulerName: "" + serverBlock: "" + service: + annotations: {} + clusterIP: "" + externalTrafficPolicy: Cluster + extraPorts: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: + http: "" + https: "" + ports: + http: 80 + https: 443 + sessionAffinity: None + sessionAffinityConfig: {} + targetPort: + http: http + https: https + type: ClusterIP + serviceAccount: + annotations: {} + automountServiceAccountToken: false + create: false + name: "" + sidecarSingleProcessNamespace: false + sidecars: [] + startupProbe: + enabled: false + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + staticSiteConfigmap: "" + staticSitePVC: "" + terminationGracePeriodSeconds: "" + tolerations: [] + topologySpreadConstraints: [] + updateStrategy: + rollingUpdate: {} + type: RollingUpdate +postgresql: + audit: + clientMinMessages: error + logConnections: false + logDisconnections: false + logHostname: false + logLinePrefix: "" + logTimezone: "" + pgAuditLog: "" + pgAuditLogCatalog: "off" + common: + exampleValue: common-chart + global: + imagePullSecrets: [] + imageRegistry: "" + postgresql: + existingSecret: "" + postgresqlDatabase: "" + postgresqlPassword: "" + postgresqlUsername: "" + replicationPassword: "" + servicePort: "" + storageClass: "" + commonAnnotations: {} + commonLabels: {} + configurationConfigMap: "" + containerPorts: + postgresql: 5432 + containerSecurityContext: + enabled: true + runAsUser: 1001 + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + diagnosticMode: + args: + - infinity + command: + - sleep + enabled: false + enabled: true + existingSecret: "" + extendedConfConfigMap: "" + extraDeploy: [] + extraEnv: [] + extraEnvVarsCM: "" + fullnameOverride: "" + global: + imagePullSecrets: [] + imageRegistry: "" + postgresql: + existingSecret: "" + postgresqlDatabase: "" + postgresqlPassword: "" + postgresqlUsername: "" + replicationPassword: "" + servicePort: "" + storageClass: "" + image: + debug: false + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/postgresql + tag: 11.14.0-debian-10-r28 + initdbPassword: "" + initdbScripts: {} + initdbScriptsConfigMap: "" + initdbScriptsSecret: "" + initdbUser: "" + ldap: + baseDN: "" + bind_password: "" + bindDN: "" + enabled: false + port: "" + prefix: "" + scheme: "" + search_attr: "" + search_filter: "" + server: "" + suffix: "" + tls: "" + url: "" + lifecycleHooks: {} + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + metrics: + customMetrics: {} + enabled: false + extraEnvVars: [] + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.10.0-debian-10-r172 + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + prometheusRule: + additionalLabels: {} + enabled: false + namespace: "" + rules: [] + readinessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + resources: {} + securityContext: + enabled: false + runAsUser: 1001 + service: + annotations: + prometheus.io/port: "9187" + prometheus.io/scrape: "true" + loadBalancerIP: "" + type: ClusterIP + serviceMonitor: + additionalLabels: {} + enabled: false + interval: "" + metricRelabelings: [] + namespace: "" + relabelings: [] + scrapeTimeout: "" + nameOverride: sentry-postgresql + networkPolicy: + allowExternal: true + enabled: false + explicitNamespacesSelector: {} + persistence: + accessModes: + - ReadWriteOnce + annotations: {} + enabled: true + existingClaim: "" + mountPath: /bitnami/postgresql + selector: {} + size: 200Gi + snapshotName: "" + storageClass: "" + subPath: "" + pgHbaConfiguration: "" + postgresqlConfiguration: {} + postgresqlDataDir: /bitnami/postgresql/data + postgresqlDatabase: sentry + postgresqlDbUserConnectionLimit: "" + postgresqlExtendedConf: {} + postgresqlInitdbArgs: "" + postgresqlInitdbWalDir: "" + postgresqlMaxConnections: "" + postgresqlPassword: "" + postgresqlPghbaRemoveFilters: "" + postgresqlPostgresConnectionLimit: "" + postgresqlPostgresPassword: "" + postgresqlSharedPreloadLibraries: pgaudit + postgresqlStatementTimeout: "" + postgresqlTcpKeepalivesCount: "" + postgresqlTcpKeepalivesIdle: "" + postgresqlTcpKeepalivesInterval: "" + postgresqlUsername: postgres + primary: + affinity: {} + annotations: {} + extraInitContainers: [] + extraPodSpec: {} + extraVolumeMounts: [] + extraVolumes: [] + labels: {} + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + priorityClassName: "" + service: + clusterIP: "" + nodePort: "" + type: "" + sidecars: [] + tolerations: [] + primaryAsStandBy: + enabled: false + primaryHost: "" + primaryPort: "" + psp: + create: false + rbac: + create: false + readReplicas: + affinity: {} + annotations: {} + extraInitContainers: [] + extraPodSpec: {} + extraVolumeMounts: [] + extraVolumes: [] + labels: {} + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + persistence: + enabled: true + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + priorityClassName: "" + resources: {} + service: + clusterIP: "" + nodePort: "" + type: "" + sidecars: [] + tolerations: [] + topologySpreadConstraints: [] + readinessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + replication: + applicationName: sentry + enabled: false + numSynchronousReplicas: 1 + password: repl_password + readReplicas: 2 + singleService: true + synchronousCommit: "on" + uniqueServices: false + user: repl_user + resources: + requests: + cpu: 250m + memory: 512Mi + schedulerName: "" + securityContext: + enabled: true + fsGroup: 1001 + service: + annotations: {} + clusterIP: "" + externalTrafficPolicy: Cluster + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePort: "" + port: 5432 + type: ClusterIP + serviceAccount: + autoMount: false + enabled: false + name: "" + shmVolume: + chmod: + enabled: true + enabled: true + sizeLimit: "" + startupProbe: + enabled: false + failureThreshold: 10 + initialDelaySeconds: 30 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + terminationGracePeriodSeconds: "" + tls: + autoGenerated: false + certCAFilename: "" + certFilename: "" + certKeyFilename: "" + certificatesSecret: "" + crlFilename: "" + enabled: false + preferServerCiphers: true + updateStrategy: + type: RollingUpdate + usePasswordFile: false + volumePermissions: + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r305 + securityContext: + runAsUser: 0 +prefix: null +rabbitmq: + advancedConfiguration: "" + affinity: {} + args: [] + auth: + erlangCookie: pHgpy3Q6adTskzAT6bLHCFqFTF7lMxhA + existingErlangSecret: "" + existingPasswordSecret: "" + password: guest + tls: + autoGenerated: false + caCertificate: "" + enabled: false + existingSecret: "" + existingSecretFullChain: false + failIfNoPeerCert: true + serverCertificate: "" + serverKey: "" + sslOptionsVerify: verify_peer + username: guest + clusterDomain: cluster.local + clustering: + addressType: hostname + enabled: true + forceBoot: true + partitionHandling: autoheal + rebalance: true + command: [] + common: + exampleValue: common-chart + global: + imagePullSecrets: [] + imageRegistry: "" + storageClass: "" + commonAnnotations: {} + communityPlugins: "" + configuration: |- + ## Username and password + ## + default_user = {{ .Values.auth.username }} + default_pass = CHANGEME + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }} + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = {{ .Values.clustering.partitionHandling }} + {{- end }} + {{- if .Values.loadDefinition.enabled }} + load_definitions = {{ .Values.loadDefinition.file }} + {{- end }} + # queue master locator + queue_master_locator = min-masters + # enable guest user + loopback_users.guest = false + {{ tpl .Values.extraConfiguration . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.tlsPort }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1 = rabbit_auth_backend_ldap + auth_backends.2 = internal + {{- range $index, $server := .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ .Values.ldap.port }} + auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = true + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + ## Prometheus metrics + ## + prometheus.tcp.port = 9419 + {{- end }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + containerSecurityContext: {} + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + diagnosticMode: + args: + - infinity + command: + - sleep + enabled: false + dnsConfig: {} + dnsPolicy: "" + enabled: true + extraConfiguration: | + load_definitions = /app/load_definition.json + extraContainerPorts: [] + extraDeploy: [] + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + extraPlugins: rabbitmq_auth_backend_ldap + extraSecrets: + load-definition: + load_definition.json: | + { + "users": [ + { + "name": "{{ .Values.auth.username }}", + "password": "{{ .Values.auth.password }}", + "tags": "administrator" + } + ], + "permissions": [{ + "user": "{{ .Values.auth.username }}", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" + }], + "policies": [ + { + "name": "ha-all", + "pattern": ".*", + "vhost": "/", + "definition": { + "ha-mode": "all", + "ha-sync-mode": "automatic", + "ha-sync-batch-size": 1 + } + } + ], + "vhosts": [ + { + "name": "/" + } + ] + } + extraSecretsPrependReleaseName: false + extraVolumeMounts: [] + extraVolumes: [] + fullnameOverride: "" + global: + imagePullSecrets: [] + imageRegistry: "" + storageClass: "" + hostAliases: [] + image: + debug: false + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/rabbitmq + tag: 3.9.16-debian-10-r0 + ingress: + annotations: {} + enabled: false + extraHosts: [] + extraRules: [] + extraTls: [] + hostname: rabbitmq.local + ingressClassName: "" + path: / + pathType: ImplementationSpecific + secrets: [] + selfSigned: false + tls: false + initContainers: [] + kubeVersion: "" + ldap: + enabled: false + port: "389" + servers: [] + tls: + enabled: false + user_dn_pattern: cn=${username},dc=example,dc=org + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 120 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 20 + loadDefinition: + enabled: true + existingSecret: load-definition + file: /app/load_definition.json + logs: '-' + maxAvailableSchedulers: "" + memoryHighWatermark: + enabled: false + type: relative + value: 0.4 + metrics: + enabled: false + plugins: rabbitmq_prometheus + podAnnotations: + prometheus.io/port: '{{ .Values.service.metricsPort }}' + prometheus.io/scrape: "true" + prometheusRule: + additionalLabels: {} + enabled: false + namespace: "" + rules: [] + serviceMonitor: + additionalLabels: {} + enabled: false + honorLabels: false + interval: 30s + metricRelabelings: [] + namespace: "" + path: "" + podTargetLabels: {} + relabelings: [] + relabellings: [] + scrapeTimeout: "" + targetLabels: {} + nameOverride: "" + networkPolicy: + additionalRules: [] + allowExternal: true + enabled: false + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + onlineSchedulers: "" + pdb: + create: true + maxUnavailable: "" + minAvailable: 1 + persistence: + accessMode: ReadWriteOnce + annotations: {} + enabled: true + existingClaim: "" + mountPath: /bitnami/rabbitmq/mnesia + selector: {} + size: 8Gi + storageClass: "" + subPath: "" + volumes: [] + plugins: rabbitmq_management rabbitmq_peer_discovery_k8s + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + podManagementPolicy: OrderedReady + podSecurityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + priorityClassName: "" + rbac: + create: true + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 20 + replicaCount: 3 + resources: + limits: {} + requests: {} + schedulerName: "" + service: + annotations: {} + annotationsHeadless: {} + distNodePort: "" + distPort: 25672 + distPortEnabled: true + distPortName: dist + epmdNodePort: "" + epmdPortEnabled: true + epmdPortName: epmd + externalIPs: [] + externalTrafficPolicy: Cluster + extraPorts: [] + labels: {} + loadBalancerIP: "" + loadBalancerSourceRanges: [] + managerNodePort: "" + managerPort: 15672 + managerPortEnabled: true + managerPortName: http-stats + metricsNodePort: "" + metricsPort: 9419 + metricsPortName: metrics + nodePort: "" + port: 5672 + portEnabled: true + portName: amqp + tlsNodePort: "" + tlsPort: 5671 + tlsPortName: amqp-ssl + type: ClusterIP + serviceAccount: + automountServiceAccountToken: true + create: true + name: "" + sidecars: [] + statefulsetLabels: {} + terminationGracePeriodSeconds: 120 + tolerations: [] + topologySpreadConstraints: [] + ulimitNofiles: "65536" + updateStrategyType: RollingUpdate + volumePermissions: + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r408 + resources: + limits: {} + requests: {} +redis: + architecture: replication + auth: + enabled: false + existingSecret: "" + existingSecretPasswordKey: "" + password: "" + sentinel: false + usePasswordFiles: false + clusterDomain: cluster.local + common: + exampleValue: common-chart + global: + imagePullSecrets: [] + imageRegistry: "" + redis: + password: "" + storageClass: "" + commonAnnotations: {} + commonConfiguration: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + # max memory + maxmemory 4096MB + # + maxmemory-policy volatile-lru + commonLabels: {} + diagnosticMode: + args: + - infinity + command: + - sleep + enabled: false + enabled: true + existingConfigmap: "" + extraDeploy: [] + fullnameOverride: "" + global: + imagePullSecrets: [] + imageRegistry: "" + redis: + password: "" + storageClass: "" + image: + debug: false + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/redis + tag: 6.2.7-debian-11-r3 + kubeVersion: "" + master: + affinity: {} + args: [] + command: [] + configuration: "" + containerPorts: + redis: 6379 + containerSecurityContext: + enabled: true + runAsUser: 1001 + count: 1 + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + disableCommands: + - FLUSHDB + - FLUSHALL + dnsConfig: {} + dnsPolicy: "" + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + extraFlags: [] + extraVolumeMounts: [] + extraVolumes: [] + hostAliases: [] + initContainers: [] + kind: StatefulSet + lifecycleHooks: {} + livenessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 20 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + persistence: + accessModes: + - ReadWriteOnce + annotations: {} + dataSource: {} + enabled: true + existingClaim: "" + medium: "" + path: /data + selector: {} + size: 8Gi + sizeLimit: "" + storageClass: "" + subPath: "" + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + podSecurityContext: + enabled: true + fsGroup: 1001 + preExecCmds: [] + priorityClassName: "" + readinessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 20 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: {} + requests: {} + schedulerName: "" + service: + annotations: {} + clusterIP: "" + externalTrafficPolicy: Cluster + extraPorts: [] + internalTrafficPolicy: Cluster + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: + redis: "" + ports: + redis: 6379 + sessionAffinity: None + sessionAffinityConfig: {} + type: ClusterIP + shareProcessNamespace: false + sidecars: [] + startupProbe: + enabled: false + failureThreshold: 5 + initialDelaySeconds: 20 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + tolerations: [] + topologySpreadConstraints: [] + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + metrics: + command: [] + containerSecurityContext: + enabled: true + runAsUser: 1001 + enabled: false + extraArgs: {} + extraEnvVars: [] + extraVolumeMounts: [] + extraVolumes: [] + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.40.0-debian-11-r0 + podAnnotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + podLabels: {} + prometheusRule: + additionalLabels: {} + enabled: false + namespace: "" + rules: [] + redisTargetHost: localhost + resources: + limits: {} + requests: {} + service: + annotations: {} + externalTrafficPolicy: Cluster + extraPorts: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + port: 9121 + type: ClusterIP + serviceMonitor: + additionalLabels: {} + enabled: false + honorLabels: false + interval: 30s + metricRelabelings: [] + namespace: "" + relabellings: [] + scrapeTimeout: "" + nameOverride: sentry-redis + networkPolicy: + allowExternal: true + enabled: false + extraEgress: [] + extraIngress: [] + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + pdb: + create: false + maxUnavailable: "" + minAvailable: 1 + podSecurityPolicy: + create: false + enabled: false + rbac: + create: false + rules: [] + replica: + affinity: {} + args: [] + autoscaling: + enabled: false + maxReplicas: 11 + minReplicas: 1 + targetCPU: "" + targetMemory: "" + command: [] + configuration: "" + containerPorts: + redis: 6379 + containerSecurityContext: + enabled: true + runAsUser: 1001 + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + disableCommands: + - FLUSHDB + - FLUSHALL + dnsConfig: {} + dnsPolicy: "" + externalMaster: + enabled: false + host: "" + port: 6379 + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + extraFlags: [] + extraVolumeMounts: [] + extraVolumes: [] + hostAliases: [] + initContainers: [] + lifecycleHooks: {} + livenessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 20 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + persistence: + accessModes: + - ReadWriteOnce + annotations: {} + dataSource: {} + enabled: true + existingClaim: "" + medium: "" + path: /data + selector: {} + size: 8Gi + sizeLimit: "" + storageClass: "" + subPath: "" + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + podManagementPolicy: "" + podSecurityContext: + enabled: true + fsGroup: 1001 + preExecCmds: [] + priorityClassName: "" + readinessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 20 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + replicaCount: 3 + resources: + limits: {} + requests: {} + schedulerName: "" + service: + annotations: {} + clusterIP: "" + externalTrafficPolicy: Cluster + extraPorts: [] + internalTrafficPolicy: Cluster + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: + redis: "" + ports: + redis: 6379 + sessionAffinity: None + sessionAffinityConfig: {} + type: ClusterIP + shareProcessNamespace: false + sidecars: [] + startupProbe: + enabled: true + failureThreshold: 22 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + tolerations: [] + topologySpreadConstraints: [] + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + secretAnnotations: {} + sentinel: + args: [] + automateClusterRecovery: false + command: [] + configuration: "" + containerPorts: + sentinel: 26379 + containerSecurityContext: + enabled: true + runAsUser: 1001 + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + downAfterMilliseconds: 60000 + enabled: false + externalMaster: + enabled: false + host: "" + port: 6379 + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + extraVolumeMounts: [] + extraVolumes: [] + failoverTimeout: 18000 + getMasterTimeout: 220 + image: + debug: false + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/redis-sentinel + tag: 6.2.7-debian-11-r4 + lifecycleHooks: {} + livenessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 20 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + masterSet: mymaster + parallelSyncs: 1 + persistence: + accessModes: + - ReadWriteOnce + annotations: {} + dataSource: {} + enabled: false + medium: "" + selector: {} + size: 100Mi + storageClass: "" + preExecCmds: [] + quorum: 2 + readinessProbe: + enabled: true + failureThreshold: 5 + initialDelaySeconds: 20 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: {} + requests: {} + service: + annotations: {} + clusterIP: "" + externalTrafficPolicy: Cluster + extraPorts: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: + redis: "" + sentinel: "" + ports: + redis: 6379 + sentinel: 26379 + sessionAffinity: None + sessionAffinityConfig: {} + type: ClusterIP + startupProbe: + enabled: true + failureThreshold: 22 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + serviceAccount: + annotations: {} + automountServiceAccountToken: true + create: true + name: "" + sysctl: + command: [] + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r3 + mountHostSys: false + resources: + limits: {} + requests: {} + tls: + authClients: true + autoGenerated: false + certCAFilename: "" + certFilename: "" + certKeyFilename: "" + certificatesSecret: "" + dhParamsFilename: "" + enabled: false + existingSecret: "" + useExternalDNS: + additionalAnnotations: {} + annotationKey: external-dns.alpha.kubernetes.io/ + enabled: false + suffix: "" + usePassword: false + volumePermissions: + containerSecurityContext: + runAsUser: 0 + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r3 + resources: + limits: {} + requests: {} +relay: + affinity: {} + autoscaling: + enabled: false + maxReplicas: 5 + minReplicas: 2 + targetCPUUtilizationPercentage: 50 + env: [] + mode: managed + nodeSelector: {} + probeFailureThreshold: 5 + probeInitialDelaySeconds: 10 + probePeriodSeconds: 10 + probeSuccessThreshold: 1 + probeTimeoutSeconds: 2 + replicas: 1 + resources: {} + securityContext: {} + service: + annotations: {} + sidecars: [] + volumes: [] +revisionHistoryLimit: 10 +sentry: + cleanup: + activeDeadlineSeconds: 100 + concurrencyPolicy: Allow + days: 15 + enabled: true + failedJobsHistoryLimit: 5 + schedule: 0 0 * * * + serviceAccount: {} + sidecars: [] + successfulJobsHistoryLimit: 5 + volumes: [] + cron: + affinity: {} + env: [] + nodeSelector: {} + replicas: 1 + resources: {} + sidecars: [] + volumes: [] + features: + orgSubdomains: false + vstsLimitedScopes: true + ingestConsumer: + affinity: {} + autoscaling: + enabled: false + maxReplicas: 3 + minReplicas: 1 + targetCPUUtilizationPercentage: 50 + env: [] + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} + sidecars: [] + volumes: [] + postProcessForward: + affinity: {} + env: [] + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} + sidecars: [] + volumes: [] + singleOrganization: true + subscriptionConsumerEvents: + affinity: {} + env: [] + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} + sidecars: [] + volumes: [] + subscriptionConsumerTransactions: + affinity: {} + env: [] + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} + sidecars: [] + volumes: [] + web: + affinity: {} + autoscaling: + enabled: false + maxReplicas: 5 + minReplicas: 2 + targetCPUUtilizationPercentage: 50 + env: [] + nodeSelector: {} + probeFailureThreshold: 5 + probeInitialDelaySeconds: 10 + probePeriodSeconds: 10 + probeSuccessThreshold: 1 + probeTimeoutSeconds: 2 + replicas: 1 + resources: {} + securityContext: {} + service: + annotations: {} + sidecars: [] + strategyType: RollingUpdate + volumes: [] + worker: + affinity: {} + autoscaling: + enabled: false + maxReplicas: 5 + minReplicas: 2 + targetCPUUtilizationPercentage: 50 + env: [] + livenessProbe: + enabled: false + failureThreshold: 3 + periodSeconds: 60 + timeoutSeconds: 10 + nodeSelector: {} + replicas: 3 + resources: {} + sidecars: [] + volumes: [] +service: + annotations: {} + externalPort: 9000 + name: sentry + type: ClusterIP +serviceAccount: + annotations: {} + automountServiceAccountToken: true + enabled: false + name: sentry +slack: {} +snuba: + api: + affinity: {} + autoscaling: + enabled: false + maxReplicas: 5 + minReplicas: 2 + targetCPUUtilizationPercentage: 50 + command: {} + env: [] + liveness: + timeoutSeconds: 2 + nodeSelector: {} + probeInitialDelaySeconds: 10 + readiness: + timeoutSeconds: 2 + replicas: 1 + resources: {} + securityContext: {} + service: + annotations: {} + sidecars: [] + volumes: [] + cleanupErrors: + activeDeadlineSeconds: 100 + concurrencyPolicy: Allow + enabled: true + schedule: 0 * * * * + serviceAccount: {} + sidecars: [] + successfulJobsHistoryLimit: 5 + volumes: [] + cleanupTransactions: + activeDeadlineSeconds: 100 + concurrencyPolicy: Allow + enabled: true + failedJobsHistoryLimit: 5 + schedule: 0 * * * * + serviceAccount: {} + sidecars: [] + successfulJobsHistoryLimit: 5 + volumes: [] + consumer: + affinity: {} + autoOffsetReset: earliest + env: [] + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} + dbInitJob: + env: [] + migrateJob: + env: [] + outcomesConsumer: + affinity: {} + autoOffsetReset: earliest + env: [] + maxBatchSize: "3" + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} + replacer: + affinity: {} + autoOffsetReset: earliest + env: [] + maxBatchSize: "3" + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} + sessionsConsumer: + affinity: {} + autoOffsetReset: earliest + env: [] + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} + subscriptionConsumerEvents: + affinity: {} + autoOffsetReset: earliest + env: [] + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} + subscriptionConsumerTransactions: + affinity: {} + autoOffsetReset: earliest + env: [] + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} + transactionsConsumer: + affinity: {} + autoOffsetReset: earliest + env: [] + nodeSelector: {} + replicas: 1 + resources: {} + securityContext: {} +sourcemaps: + enabled: false +symbolicator: + api: + affinity: {} + autoscaling: + enabled: false + maxReplicas: 5 + minReplicas: 2 + targetCPUUtilizationPercentage: 50 + config: |- + # See: https://getsentry.github.io/symbolicator/#configuration + cache_dir: "/data" + bind: "0.0.0.0:3021" + logging: + level: "warn" + metrics: + statsd: null + prefix: "symbolicator" + sentry_dsn: null + connect_to_reserved_ips: true + # caches: + # downloaded: + # max_unused_for: 1w + # retry_misses_after: 5m + # retry_malformed_after: 5m + # derived: + # max_unused_for: 1w + # retry_misses_after: 5m + # retry_malformed_after: 5m + # diagnostics: + # retention: 1w + env: [] + nodeSelector: {} + probeInitialDelaySeconds: 10 + replicas: 1 + resources: {} + securityContext: {} + cleanup: + enabled: false + enabled: false +system: + adminEmail: "devops@vikadata.com" + public: false + url: "" +user: + create: true + email: "devops@vikadata@com" + password: "nAqRbbknfeTYh9maF98r" +zookeeper: + affinity: {} + args: [] + auth: + clientPassword: "" + clientUser: "" + enabled: false + existingSecret: "" + serverPasswords: "" + serverUsers: "" + autopurge: + purgeInterval: 0 + snapRetainCount: 3 + clusterDomain: cluster.local + command: + - /scripts/setup.sh + common: + exampleValue: common-chart + global: + imagePullSecrets: [] + imageRegistry: "" + storageClass: "" + commonAnnotations: {} + commonLabels: {} + configuration: "" + containerPorts: + client: 2181 + election: 3888 + follower: 2888 + tls: 3181 + containerSecurityContext: + enabled: true + runAsNonRoot: true + runAsUser: 1001 + customLivenessProbe: {} + customReadinessProbe: {} + customStartupProbe: {} + dataLogDir: "" + diagnosticMode: + args: + - infinity + command: + - sleep + enabled: false + enabled: true + existingConfigmap: "" + extraDeploy: [] + extraEnvVars: [] + extraEnvVarsCM: "" + extraEnvVarsSecret: "" + extraVolumeMounts: [] + extraVolumes: [] + fourlwCommandsWhitelist: srvr, mntr, ruok + fullnameOverride: "" + global: + imagePullSecrets: [] + imageRegistry: "" + storageClass: "" + heapSize: 1024 + hostAliases: [] + image: + debug: false + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/zookeeper + tag: 3.8.0-debian-10-r0 + initContainers: [] + initLimit: 10 + jvmFlags: "" + kubeVersion: "" + lifecycleHooks: {} + listenOnAllIPs: false + livenessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 30 + periodSeconds: 10 + probeCommandTimeout: 2 + successThreshold: 1 + timeoutSeconds: 5 + logLevel: ERROR + maxClientCnxns: 60 + maxSessionTimeout: 40000 + metrics: + containerPort: 9141 + enabled: false + prometheusRule: + additionalLabels: {} + enabled: false + namespace: "" + rules: [] + service: + annotations: + prometheus.io/path: /metrics + prometheus.io/port: '{{ .Values.metrics.service.port }}' + prometheus.io/scrape: "true" + port: 9141 + type: ClusterIP + serviceMonitor: + additionalLabels: {} + enabled: false + honorLabels: false + interval: "" + jobLabel: "" + metricRelabelings: [] + namespace: "" + relabelings: [] + scrapeTimeout: "" + selector: {} + minServerId: 1 + nameOverride: zookeeper-clickhouse + namespaceOverride: "" + networkPolicy: + allowExternal: true + enabled: false + nodeAffinityPreset: + key: "" + type: "" + values: [] + nodeSelector: {} + pdb: + create: false + maxUnavailable: 1 + minAvailable: "" + persistence: + accessModes: + - ReadWriteOnce + annotations: {} + dataLogDir: + existingClaim: "" + selector: {} + size: 8Gi + enabled: true + existingClaim: "" + selector: {} + size: 8Gi + storageClass: "" + podAffinityPreset: "" + podAnnotations: {} + podAntiAffinityPreset: soft + podLabels: {} + podManagementPolicy: Parallel + podSecurityContext: + enabled: true + fsGroup: 1001 + preAllocSize: 65536 + priorityClassName: "" + readinessProbe: + enabled: true + failureThreshold: 6 + initialDelaySeconds: 5 + periodSeconds: 10 + probeCommandTimeout: 2 + successThreshold: 1 + timeoutSeconds: 5 + replicaCount: 3 + resources: + limits: {} + requests: + cpu: 250m + memory: 256Mi + schedulerName: "" + service: + annotations: {} + clusterIP: "" + disableBaseClientPort: false + externalTrafficPolicy: Cluster + extraPorts: [] + headless: + annotations: {} + publishNotReadyAddresses: true + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: + client: "" + tls: "" + ports: + client: 2181 + election: 3888 + follower: 2888 + tls: 3181 + sessionAffinity: None + type: ClusterIP + serviceAccount: + annotations: {} + automountServiceAccountToken: true + create: false + name: "" + sidecars: [] + snapCount: 100000 + startupProbe: + enabled: false + failureThreshold: 15 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + syncLimit: 5 + tickTime: 2000 + tls: + client: + autoGenerated: false + enabled: false + existingSecret: "" + keystorePassword: "" + keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks + passwordsSecretName: "" + truststorePassword: "" + truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks + quorum: + autoGenerated: false + enabled: false + existingSecret: "" + keystorePassword: "" + keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks + passwordsSecretName: "" + truststorePassword: "" + truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks + resources: + limits: {} + requests: {} + tolerations: [] + topologySpreadConstraints: {} + updateStrategy: + rollingUpdate: {} + type: RollingUpdate + volumePermissions: + containerSecurityContext: + runAsUser: 0 + enabled: false + image: + pullPolicy: IfNotPresent + pullSecrets: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r368 + resources: + limits: {} + requests: {} diff --git a/examples/external-db/README.md b/examples/external-db/README.md new file mode 100644 index 0000000..b8ad78b --- /dev/null +++ b/examples/external-db/README.md @@ -0,0 +1,46 @@ +## This example creates an apitable from `app` modules + +## Configration +1. Replace the kubeconfig certificate to `config-k8s/kubeconfig` with the appropriate one. +2. Replace `namespace`, `env`, `default_storage_class_name` and other configurations according to the annotation instructions +3. Replace all the `regcred` informations . + +## Useage +To run this example you need to execute: +``` +$ terraform init +$ terraform plan +$ terraform apply +``` +Run `terraform destroy` when you don't need these resources. + + +### Instructions +```mermaid +graph TD; + subgraph "APITable Infrastructure"; + Client -->|HTTP| gateway((Gateway)); + + subgraph "Apitalbe app (k8s)"; + gateway -->|HTTP| backend-server; + gateway -->|Web Socket| socket-server; + gateway -->|HTTP| web-server; + gateway -->|HTTP| room-server; + room-server --> backend-server; + gateway --> |HTTP| imageproxy-server; + + end + + subgraph "Extenrnal db (pass cloud)"; + socket-server --> |Web Socket| room-server; + backend-server -->|Read/Write| mysql((MySQL)); + backend-server -->|Read/Write| redis((Redis)); + backend-server -->|Read/Write| S3((minio)); + room-server -->|Read/Write| mysql((MySQL)); + room-server -->|Read/Write| redis((Redis)); + room-server -->|Read/Write| mq((RabbitMQ)); + imageproxy-server -->|Read| S3((minio)); + end + + end +``` diff --git a/examples/external-db/config-k8s/kubeconfig b/examples/external-db/config-k8s/kubeconfig new file mode 100644 index 0000000..a67a257 --- /dev/null +++ b/examples/external-db/config-k8s/kubeconfig @@ -0,0 +1 @@ +### Please replace the certificate content \ No newline at end of file diff --git a/examples/external-db/main.tf b/examples/external-db/main.tf new file mode 100644 index 0000000..e3fd623 --- /dev/null +++ b/examples/external-db/main.tf @@ -0,0 +1,52 @@ + +module "apitable-enterprise" { + + source = "../../" + #version = "x.x.x" + + namespace = "apitable-enterprise" + has_datacenter = false + create_ns = true + + // Environmental variables, such as databases, storage, etc. + env = { + MYSQL_DATABASE = "apitable" + MYSQL_HOST = "mysql-host.example.com" + MYSQL_PORT = "3306" + MYSQL_USERNAME = "root" + MYSQL_PASSWORD = "apitable@com" + DATABASE_TABLE_PREFIX = "apitable_" + + RABBITMQ_HOST = "rabbitmq-host.example.com" + RABBITMQ_PORT = "5672" + RABBITMQ_USERNAME = "apitable" + RABBITMQ_PASSWORD = "apitable@com" + RABBITMQ_VHOST = "/" + + AWS_ACCESS_KEY = "admin" + AWS_ACCESS_SECRET = "apitable@com" + AWS_ENDPOINT = "http://aws-endpoint.example.com" + AWS_REGION = "us-east-1" + AWS_HOST = "aws-endpoint.example.com" + AWS_PORT = "9000" + + REDIS_HOST = "redis-master.example.com" + REDIS_PASSWORD = "apitable@com" + } + + #Replace the storage_class of the cluster. + default_storage_class_name = "" + + regcred = { + registry_server = "" + username = "" + password = "" + } + + image_tags = { + backend_server = "latest-alpha" + room_server = "latest-alpha" + #Others .. + } + +} \ No newline at end of file diff --git a/examples/external-db/versions.tf b/examples/external-db/versions.tf new file mode 100644 index 0000000..e0040a0 --- /dev/null +++ b/examples/external-db/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.16.1" + } + helm = { + source = "hashicorp/helm" + # Fix helm resource repeat. + version = "2.9.0" + } + } +} + +provider "kubernetes" { + config_path = "${path.module}/config-k8s/kubeconfig" +} + +provider "helm" { + kubernetes { + config_path = "${path.module}/config-k8s/kubeconfig" + + } +} \ No newline at end of file diff --git a/examples/local-db/README.md b/examples/local-db/README.md new file mode 100644 index 0000000..62d801b --- /dev/null +++ b/examples/local-db/README.md @@ -0,0 +1,48 @@ +## This example creates an apitable from the built-in `datacenter` and `app` modules + +## Configration +1. Replace the kubeconfig certificate to `config-k8s/kubeconfig` with the appropriate one. +2. Replace `namespace`, `env`, `default_storage_class_name` and other configurations according to the annotation instructions +3. Replace all the `regcred` informations . + +## Useage +To run this example you need to execute: +``` +$ terraform init +$ terraform plan +$ terraform apply +``` +Run `terraform destroy` when you don't need these resources. + + +### Instructions +```mermaid +graph TD; + subgraph "APITable Infrastructure"; + Client -->|HTTP| gateway((Gateway)); + + subgraph "k8s" + subgraph "Apitalbe-app (namespace)"; + gateway -->|HTTP| backend-server; + gateway -->|Web Socket| socket-server; + gateway -->|HTTP| web-server; + gateway -->|HTTP| room-server; + room-server --> backend-server; + gateway --> |HTTP| imageproxy-server; + + end + + subgraph "Apitable-db (namespace)"; + socket-server --> |Web Socket| room-server; + backend-server -->|Read/Write| mysql((MySQL)); + backend-server -->|Read/Write| redis((Redis)); + backend-server -->|Read/Write| S3((minio)); + room-server -->|Read/Write| mysql((MySQL)); + room-server -->|Read/Write| redis((Redis)); + room-server -->|Read/Write| mq((RabbitMQ)); + imageproxy-server -->|Read| S3((minio)); + end + end + + end +``` diff --git a/examples/local-db/config-k8s/kubeconfig b/examples/local-db/config-k8s/kubeconfig new file mode 100644 index 0000000..a67a257 --- /dev/null +++ b/examples/local-db/config-k8s/kubeconfig @@ -0,0 +1 @@ +### Please replace the certificate content \ No newline at end of file diff --git a/examples/local-db/main.tf b/examples/local-db/main.tf new file mode 100644 index 0000000..4190b25 --- /dev/null +++ b/examples/local-db/main.tf @@ -0,0 +1,25 @@ + +module "apitable-enterprise" { + + source = "../../" + + namespace = "apitable-enterprise" + data_namespace = "apitable-datacenter" + has_datacenter = true + create_ns = true + + // Environmental variables, such as databases, storage, etc. + env = { + "PARAM1" = "xx1" + "PARAM2" = "xx2" + } + + default_storage_class_name = "" + + regcred = { + registry_server = "" + username = "" + password = "" + } + +} \ No newline at end of file diff --git a/examples/local-db/versions.tf b/examples/local-db/versions.tf new file mode 100644 index 0000000..e0040a0 --- /dev/null +++ b/examples/local-db/versions.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.16.1" + } + helm = { + source = "hashicorp/helm" + # Fix helm resource repeat. + version = "2.9.0" + } + } +} + +provider "kubernetes" { + config_path = "${path.module}/config-k8s/kubeconfig" +} + +provider "helm" { + kubernetes { + config_path = "${path.module}/config-k8s/kubeconfig" + + } +} \ No newline at end of file diff --git a/main.tf b/main.tf new file mode 100644 index 0000000..3eaeceb --- /dev/null +++ b/main.tf @@ -0,0 +1,201 @@ + +locals { + datacenter_namespace = var.has_datacenter ? var.data_namespace : var.namespace + all_namespace = [var.namespace, local.datacenter_namespace] + image_init_db = "${var.regcred.registry_server}/${lookup(var.image_namespaces, "init_db", "vikadata/vika")}/init-db:${lookup(var.image_tags, "init_db", "latest")}" + image_init_enterprise_db = "${var.regcred.registry_server}/${lookup(var.image_namespaces, "init_db", "vikadata/vika")}/init-db:${lookup(var.image_tags, "init_db", "latest")}" + image_init_appdata = "${var.regcred.registry_server}/${lookup(var.image_namespaces, "init_appdata", "vikadata/apitable-ce")}/init-appdata:${lookup(var.image_tags, "init_appdata", "latest")}" + + + env_config = merge({ + # Basic storage configuration. + MYSQL_DATABASE = "apitable" + MYSQL_HOST = "mysql-primary.${local.datacenter_namespace}" + MYSQL_PORT = "3306" + MYSQL_USERNAME = "root" + MYSQL_PASSWORD = "apitable@com" + DATABASE_TABLE_PREFIX = "apitable_" + + RABBITMQ_HOST = "rabbitmq-headless.${local.datacenter_namespace}" + RABBITMQ_PORT = "5672" + RABBITMQ_USERNAME = "apitable" + RABBITMQ_PASSWORD = "apitable@com" + RABBITMQ_VHOST = "/" + + AWS_ACCESS_KEY = "admin" + AWS_ACCESS_SECRET = "apitable@com" + AWS_ENDPOINT = "http://minio.${local.datacenter_namespace}:9000" + AWS_REGION = "us-east-1" + AWS_HOST = "minio.${local.datacenter_namespace}" + AWS_PORT = "9000" + + REDIS_HOST = "redis-master.${local.datacenter_namespace}" + REDIS_PASSWORD = "apitable@com" + + # System parameter configuration. + SENTRY_DSN = "" + SKIP_GLOBAL_WIDGET_AUDIT = "true" + SKIP_USAGE_VERIFICATION = "true" + DOMAIN_NAME = "" + EDITION = "apitable-saas" + ASSETS_URL = "/assets" + ASSETS_BUCKET = "assets" + OSS_HOST = "/assets" + }, var.env) +} + + +module "apitable-app" { + source = "./modules/app" + + namespace = var.namespace + create_ns = var.create_ns + + depends_on = [ + module.apitable-datacenter + ] + + # Image configuration + registry = var.regcred.registry_server + image_tag = "latest" + image_tags = var.image_tags + image_namespace = var.image_namespace + image_namespaces = var.image_namespaces + + has_databus_server = false + has_job_admin_server = false + has_imageproxy_server = true + has_auto_reloaded_config_map = true + has_space_job_executor = false + has_bill_job_executor = false + has_cron_job = false + has_sensors_filebeat = false + + #Ensure that there is docker authentication when pulling images + backend_server_depends_on = kubernetes_secret_v1.regcred + + # service deployment configuration + node_selector = var.node_selector + resources = var.resource + + # for service envs configuration + env = local.env_config + envs = var.envs + public_assets_bucket = local.env_config.ASSETS_BUCKET + + default_server_host = "http://web-server" + pricing_host = "http://web-server" + minio_host = local.env_config.AWS_ENDPOINT + + + #SLB and certs + has_load_balancer = var.has_load_balancer + enable_ssl = var.enable_ssl + server_name = var.server_name + tls_crt = var.tls_crt + tls_key = var.tls_key + +} + +module "apitable-datacenter" { + count = var.has_datacenter ? 1 : 0 + source = "./modules/datacenter" + + namespace = local.datacenter_namespace + create_ns = var.create_ns + chart_repository = var.chart_repository + + + has_redis = var.has_redis + has_mysql = var.has_mysql + has_minio = var.has_minio + has_rabbitmq = var.has_rabbitmq + + default_storage_class_name = var.default_storage_class_name + default_storage_size = var.default_storage_size + + + # Only effective during initial installation. + mysql_init_database = local.env_config.MYSQL_DATABASE + mysql_default_root_password = local.env_config.MYSQL_PASSWORD + minio_default_password = local.env_config.AWS_ACCESS_SECRET + rabbitmq_default_user = local.env_config.RABBITMQ_USERNAME + rabbitmq_default_password = local.env_config.RABBITMQ_PASSWORD + redis_default_password = local.env_config.REDIS_PASSWORD +} + +# +resource "kubernetes_secret_v1" "regcred" { + for_each = toset(local.all_namespace) + metadata { + namespace = each.key + name = "regcred" + } + + type = "kubernetes.io/dockerconfigjson" + + data = { + ".dockerconfigjson" = jsonencode({ + auths = { + (var.regcred.registry_server) = { + "username" = var.regcred.username + "password" = var.regcred.password + "auth" = base64encode("${var.regcred.username}:${var.regcred.password}") + } + } + }) + } +} + +resource "helm_release" "init_data" { + depends_on = [ + module.apitable-datacenter, + kubernetes_secret_v1.regcred + ] + name = "init-data" + chart = "${path.module}/./charts/init-data" + + + namespace = var.namespace + values = [ + < 2 ? [0] : [] + content { + name = "container.nodeSelector" + value = yamlencode(var.node_selector) + } + } + timeout = 1800 +} diff --git a/modules/app/README.md b/modules/app/README.md new file mode 100644 index 0000000..ba85155 --- /dev/null +++ b/modules/app/README.md @@ -0,0 +1,137 @@ + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.16.1 | + +## Providers + +| Name | Version | +|------|---------| +| [kubernetes](#provider\_kubernetes) | >= 2.16.1 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_config_map.backend_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map.databus_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map.filebeat_config_in](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map.fusion_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map.imageproxy_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map.nest_rest_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map.openresty_config](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map.room_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map.scheduler_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map.socket_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map.web_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_deployment.backend_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_deployment.databus_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_deployment.fusion_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_deployment.imageproxy_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_deployment.job_admin_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_deployment.nest_rest_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_deployment.openresty](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_deployment.room_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_deployment.scheduler_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_deployment.socket_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_deployment.web_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource | +| [kubernetes_horizontal_pod_autoscaler.databus_server_autoscaler](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/horizontal_pod_autoscaler) | resource | +| [kubernetes_horizontal_pod_autoscaler.fusion_server_autoscaler](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/horizontal_pod_autoscaler) | resource | +| [kubernetes_horizontal_pod_autoscaler.nest_rest_server_autoscaler](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/horizontal_pod_autoscaler) | resource | +| [kubernetes_horizontal_pod_autoscaler.room_server_autoscaler](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/horizontal_pod_autoscaler) | resource | +| [kubernetes_namespace.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_secret.openresty-extend-ssl-certs](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret) | resource | +| [kubernetes_secret.openresty-ssl-certs](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret) | resource | +| [kubernetes_service.backend_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_service.databus_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_service.fusion_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_service.imageproxy_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_service.job_admin_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_service.nest_rest_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_service.openresty_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_service.room_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_service.scheduler_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_service.socket_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_service.web_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource | +| [kubernetes_namespace.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/data-sources/namespace) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [ai\_server\_sc](#input\_ai\_server\_sc) | ai\_server storage class | `map` |
{
"size": "10Pi",
"volume_attributes": {
"subPath": "ai_server"
}
}
| no | +| [backend\_server\_depends\_on](#input\_backend\_server\_depends\_on) | n/a | `string` | `""` | no | +| [create\_ns](#input\_create\_ns) | Whether to automatically create namespace | `bool` | `true` | no | +| [default\_server\_host](#input\_default\_server\_host) | Default route processing service | `string` | `"http://web-server"` | no | +| [default\_server\_host\_override\_proxy\_host](#input\_default\_server\_host\_override\_proxy\_host) | n/a | `string` | `""` | no | +| [developers\_redirect\_url](#input\_developers\_redirect\_url) | n/a | `string` | `""` | no | +| [disallow\_robots](#input\_disallow\_robots) | Whether to disable crawlers | `bool` | `true` | no | +| [docker\_edition](#input\_docker\_edition) | # Deprecate | `string` | `"vika"` | no | +| [enable\_ssl](#input\_enable\_ssl) | Whether to enable ssl | `bool` | `false` | no | +| [env](#input\_env) | environment variable | `map(any)` | `{}` | no | +| [envs](#input\_envs) | Environment variables, submodules replace .env | `map` | `{}` | no | +| [extend\_tls\_data](#input\_extend\_tls\_data) | Extended certificate crt and key contents | `map(any)` |
{
"tls_crt": "",
"tls_domain": "",
"tls_key": ""
}
| no | +| [has\_ai\_server](#input\_has\_ai\_server) | Whether to deploy AI server? | `bool` | `false` | no | +| [has\_auto\_reloaded\_config\_map](#input\_has\_auto\_reloaded\_config\_map) | Modify the configMap whether to automatically restart pods? | `bool` | `false` | no | +| [has\_backend\_server](#input\_has\_backend\_server) | Whether to deploy Java-Api service? | `bool` | `true` | no | +| [has\_bill\_job\_executor](#input\_has\_bill\_job\_executor) | Whether to deploy XXL-JOB-subscription task executor service? | `bool` | `false` | no | +| [has\_cron\_job](#input\_has\_cron\_job) | Whether it has a timed task job? | `bool` | `true` | no | +| [has\_databus\_server](#input\_has\_databus\_server) | Deploy the databus-server? | `bool` | `true` | no | +| [has\_dingtalk\_server](#input\_has\_dingtalk\_server) | Whether to deploy DingTalk application integration service? | `bool` | `false` | no | +| [has\_document\_server](#input\_has\_document\_server) | Whether to deploy the Node-Nest.js-Document-Server service? | `bool` | `false` | no | +| [has\_extend\_tls](#input\_has\_extend\_tls) | Whether to support extended certificate | `bool` | `false` | no | +| [has\_fusion\_server](#input\_has\_fusion\_server) | Whether to deploy the Node-Nest.js-Fusion-Api-Server service? | `bool` | `true` | no | +| [has\_imageproxy\_server](#input\_has\_imageproxy\_server) | Whether to deploy the Go image clipping service? | `bool` | `false` | no | +| [has\_job\_admin\_server](#input\_has\_job\_admin\_server) | Whether to deploy XXL-JOB-Admin service? | `bool` | `false` | no | +| [has\_load\_balancer](#input\_has\_load\_balancer) | Does it come with Load Balancer? OpenResty exposes IP if any | `bool` | `false` | no | +| [has\_migration\_server](#input\_has\_migration\_server) | Whether to deploy Java-Data Migration Service? | `bool` | `false` | no | +| [has\_nest\_rest\_server](#input\_has\_nest\_rest\_server) | /dataPack API only, would be removed after publishing Galaxy version | `bool` | `false` | no | +| [has\_openresty](#input\_has\_openresty) | Does it come with an openresty gateway? With public IP and load balancing | `bool` | `true` | no | +| [has\_openresty\_ofelia\_job](#input\_has\_openresty\_ofelia\_job) | whether to bring a lightweight OfeliaJob Container? | `bool` | `false` | no | +| [has\_room\_server](#input\_has\_room\_server) | Whether to deploy the Node-Nest.js-Room-Server service? | `bool` | `true` | no | +| [has\_scheduler\_server](#input\_has\_scheduler\_server) | Whether to deploy the Node-Nest.js-Scheduler-Server service? | `bool` | `true` | no | +| [has\_sensors\_filebeat](#input\_has\_sensors\_filebeat) | Whether to enable Sensors data collection | `bool` | `true` | no | +| [has\_socket\_server](#input\_has\_socket\_server) | Whether to deploy the Node-Nest.js-Socket-Server service? | `bool` | `true` | no | +| [has\_space\_job\_executor](#input\_has\_space\_job\_executor) | Whether to deploy XXL-JO-workbench task executor service? | `bool` | `false` | no | +| [has\_web\_server](#input\_has\_web\_server) | Whether to deploy Web-Server (front-end template) service? | `bool` | `true` | no | +| [image\_namespace](#input\_image\_namespace) | What namespace container image to use when initializing | `string` | `"vikadata/vika"` | no | +| [image\_namespaces](#input\_image\_namespaces) | During initialization, you can freely control different container services, which namespaces are used respectively, and if any, overwrite image\_namespace. It is recommended that convention is better than configuration, and corresponding branches should be made in each project | `map(any)` | `{}` | no | +| [image\_pull\_policy](#input\_image\_pull\_policy) | n/a | `string` | `"IfNotPresent"` | no | +| [image\_tag](#input\_image\_tag) | What version of the container image tag to use when initializing | `string` | `"latest-alpha"` | no | +| [image\_tags](#input\_image\_tags) | During initialization, you can freely control different container services, which tags are used respectively, and if any, overwrite image\_tag. It is recommended that convention is better than configuration. Make corresponding branches in each project, and use the last image\_tag variable for global unification instead of configuring here. In addition, it should be noted that the variables here are all underscored, such as the container service backend-server, the variables here correspond to backend\_server, and match the terraform variable naming practice | `map(any)` | `{}` | no | +| [is\_wait](#input\_is\_wait) | n/a | `bool` | `true` | no | +| [job\_admin\_server\_host](#input\_job\_admin\_server\_host) | n/a | `string` | `"job-admin-server"` | no | +| [lbs\_amap\_secret](#input\_lbs\_amap\_secret) | Gaode map reverse proxy security key | `string` | `""` | no | +| [minio\_host](#input\_minio\_host) | Object storage service address | `string` | `"http://minio.apitable-datacenter:9090"` | no | +| [namespace](#input\_namespace) | Shared namespace for applications | `string` | `"apitable-app"` | no | +| [node\_selector](#input\_node\_selector) | Node node label selector | `map` | `{}` | no | +| [openresty\_annotations](#input\_openresty\_annotations) | openresty annotation, used to control load balancing specifications, slb.s1.small(5k), slb.s3.small(20w) / slb.s3.large(100w) | `map(any)` |
{
"service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec": "slb.s1.small"
}
| no | +| [openresty\_extra\_config](#input\_openresty\_extra\_config) | nginx (openresty) external configuration file, which belongs to http internal level | `string` | `""` | no | +| [openresty\_index\_block](#input\_openresty\_index\_block) | Homepage URI =/, support nginx, lua code block | `string` | `""` | no | +| [openresty\_server\_block](#input\_openresty\_server\_block) | nginx (openresty) external configuration file, which belongs to the internal configuration of service | `string` | `""` | no | +| [pricing\_host](#input\_pricing\_host) | pricing server | `string` | `"http://pricing.apitable-mkt"` | no | +| [public\_assets\_bucket](#input\_public\_assets\_bucket) | n/a | `string` | `"vk-assets-ltd"` | no | +| [publish\_databus\_server](#input\_publish\_databus\_server) | Publish the databus-server ? | `bool` | `true` | no | +| [pv\_csi](#input\_pv\_csi) | csi storage namespace | `map` |
{
"driver": "csi.juicefs.com",
"fs_type": "juicefs",
"namespace": "vika-opsbase",
"node_publish_secret_ref": "juicefs-sc-secret"
}
| no | +| [registry](#input\_registry) | The dockerHub, the default is ghcr.io of github, the Vika accelerator is ghcr.vikadata.com, and the private warehouse is docker.vika.ltd | `string` | `"ghcr.io"` | no | +| [resources](#input\_resources) | How many resources are used for different services? Including copy, CPU, and memory, the unit is MB. limit is the modified value × 2, and each environment has the default value of the minimum unit to start | `any` | `{}` | no | +| [server\_name](#input\_server\_name) | default domain name | `string` | `"vika.ltd"` | no | +| [tls\_crt](#input\_tls\_crt) | tls cert body | `string` | `""` | no | +| [tls\_key](#input\_tls\_key) | tls key body | `string` | `""` | no | +| [tls\_name](#input\_tls\_name) | tls cert name | `string` | `""` | no | +| [worker\_processes](#input\_worker\_processes) | nginx(openresty) worker\_processes CPU core number, the corresponding CPU core number in the old version k8s | `string` | `"auto"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [ingress\_ip](#output\_ingress\_ip) | Output ingress ip | +| [ingress\_ip\_alternative](#output\_ingress\_ip\_alternative) | n/a | + \ No newline at end of file diff --git a/modules/app/backend_server.tf b/modules/app/backend_server.tf new file mode 100644 index 0000000..26054d9 --- /dev/null +++ b/modules/app/backend_server.tf @@ -0,0 +1,314 @@ +locals { + default_backend_server_resources = { + replicas = "1" + requests_cpu = "100m" + requests_memory = "1024Mi" + limits_cpu = "2000m" + limits_memory = "4096Mi" + rolling_update_max_unavailable = "25%" + rolling_update_max_surge = "25%" + } +} + +locals { + backend_server_resources = merge(local.default_backend_server_resources, lookup(var.resources, "backend_server", {})) +} + +variable "backend_server_depends_on" { + //type = string + default = "" +} +resource "kubernetes_deployment" "backend_server" { + wait_for_rollout = var.is_wait + depends_on = [ + kubernetes_namespace.this, + var.backend_server_depends_on + ] + + count = var.has_backend_server ? 1 : 0 + metadata { + name = "backend-server" + namespace = var.namespace + + labels = { + app = "backend-server" + } + + } + + spec { + replicas = local.backend_server_resources["replicas"] + + selector { + match_labels = { + app = "backend-server" + } + } + + template { + metadata { + labels = { + app = "backend-server" + # sa-app = "backend-server-filebeat" + } + + annotations = { + # redeploy-timestamp = "1655781881366" + "configmap.backend-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.backend_server_env.data)) : "not_enabled" + } + } + + spec { + // 神策日志目录存放于 empty_dir {} + volume { + name = "log-path" + empty_dir {} + } + + //Configuration of filebeat. + volume { + name = "filebeat-config-in" + + config_map { + name = "filebeat-config-in" + } + } + + node_selector = var.node_selector + + //Initialize the container and create the structure of the My SQL database. + init_container { + name = "init-db" + image = "${var.registry}/${lookup(local.image_namespaces, "init_db")}/init-db:${lookup(local.image_tags, "init_db")}" + image_pull_policy = var.image_pull_policy + # command = ["sh", "-c", "chmod -R 777 /workdir && echo InitContainerFinished!"] + env { + name = "DB_HOST" + value = lookup(local.env_config, "MYSQL_HOST", "localhost") + } + env { + name = "DB_PORT" + value = lookup(local.env_config, "MYSQL_PORT", "3306") + } + env { + name = "DB_NAME" + value = lookup(local.env_config, "MYSQL_DATABASE", "") + } + env { + name = "DB_USERNAME" + value = lookup(local.env_config, "MYSQL_USERNAME", "root") + } + env { + name = "DB_PASSWORD" + value = sensitive(lookup(local.env_config, "MYSQL_PASSWORD", "")) + } + env { + name = "DATABASE_TABLE_PREFIX" + value = local.env_config["DATABASE_TABLE_PREFIX"] + } + env { + name = "ACTION" + value = "update" + } + + } + init_container { + name = "init-db-enterprise" + image = "${var.registry}/${lookup(local.image_namespaces, "init_db")}/init-db-enterprise:${lookup(local.image_tags, "init_db")}" + image_pull_policy = var.image_pull_policy + # command = ["sh", "-c", "chmod -R 777 /workdir && echo InitContainerFinished!"] + env { + name = "EDITION" + value = lookup(local.backend_server_env, "EDITION", "vika-saas") + } + env { + name = "DB_HOST" + value = lookup(local.env_config, "MYSQL_HOST", "localhost") + } + env { + name = "DB_PORT" + value = lookup(local.env_config, "MYSQL_PORT", "3306") + } + env { + name = "DB_NAME" + value = lookup(local.env_config, "MYSQL_DATABASE", "") + } + env { + name = "DB_USERNAME" + value = lookup(local.env_config, "MYSQL_USERNAME", "root") + } + env { + name = "DB_PASSWORD" + value = sensitive(lookup(local.env_config, "MYSQL_PASSWORD", "")) + } + env { + name = "DATABASE_TABLE_PREFIX" + value = local.env_config["DATABASE_TABLE_PREFIX"] + } + env { + name = "ACTION" + value = "update" + } + + } + + container { + name = "backend-server" + image = "${var.registry}/${lookup(local.image_namespaces, "backend_server")}/backend-server:${lookup(local.image_tags, "backend_server")}" + image_pull_policy = var.image_pull_policy + env_from { + config_map_ref { + name = "backend-server-env" + } + } + + + resources { + requests = { + cpu = local.backend_server_resources["requests_cpu"] + memory = local.backend_server_resources["requests_memory"] //@add_tf_local + } + limits = { + cpu = local.backend_server_resources["limits_cpu"] //@add_tf_local + memory = local.backend_server_resources["limits_memory"] //@add_tf_local + } + } + + liveness_probe { + http_get { + path = "/api/v1/actuator/health/liveness" + port = "8081" + scheme = "HTTP" + } + + initial_delay_seconds = 60 + timeout_seconds = 3 + period_seconds = 30 + success_threshold = 1 + failure_threshold = 3 + } + + readiness_probe { + http_get { + path = "/api/v1/actuator/health/readiness" + port = "8081" + scheme = "HTTP" + } + + initial_delay_seconds = 60 + timeout_seconds = 3 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + startup_probe { + http_get { + path = "/api/v1/actuator/health/readiness" + port = "8081" + scheme = "HTTP" + } + + initial_delay_seconds = 60 + timeout_seconds = 3 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + #Sensors data log storage location. + volume_mount { + name = "log-path" + mount_path = "/logs/sensors" + } + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + } + + //Filebeat sidecar, collecting Sensors data logs + dynamic "container" { + for_each = var.has_sensors_filebeat ? [1] : [] + content { + name = "filebeat" + image = "${var.registry}/vikadata/beats/filebeat:7.2.0" + args = ["-c", "/etc/filebeat.yml", "-e"] + + volume_mount { + name = "filebeat-config-in" + read_only = true + mount_path = "/etc/filebeat.yml" + sub_path = "filebeat.yml" + } + + volume_mount { + name = "log-path" + read_only = true + mount_path = "/logs/sensors" + } + } + } + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = local.backend_server_resources["rolling_update_max_unavailable"] + max_surge = local.backend_server_resources["rolling_update_max_surge"] + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } + + + //Whether to ignore the change of image tag. + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + ] + } +} + +resource "kubernetes_service" "backend_server" { + count = var.has_backend_server ? 1 : 0 + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "backend-server" + namespace = var.namespace + } + + spec { + port { + name = "backend-server-8081-8081" + protocol = "TCP" + port = 8081 + target_port = "8081" + } + port { + name = "backend-server-8083-8083" + protocol = "TCP" + port = 8083 + target_port = "8083" + } + + selector = { + app = "backend-server" + } + + type = "ClusterIP" + session_affinity = "None" + ip_families = ["IPv4"] + } +} diff --git a/modules/app/backend_server_env.tf b/modules/app/backend_server_env.tf new file mode 100644 index 0000000..18bb4f7 --- /dev/null +++ b/modules/app/backend_server_env.tf @@ -0,0 +1,72 @@ +locals { + backend_server_env = merge(local.env_config, { + LOGGER_MAX_HISTORY_DAYS = 1 + + CORS_ORIGINS = "*" + TEMPLATE_SPACE = "spcNTxlv8Drra" + + SOCKET_DOMAIN = "http://socket-server:3001/socket" + OPEN_REDIRECT_URI = "https://integration.vika.ltd/api/v1/wechat/open/getQueryAuth" + FEISHU_APP_ENABLE = "true" + + SPRINGFOX_ENABLED = "true" + SWAGGER_ENABLED = "true" + DECORATOR_ENABLED = "true" + ZIPKIN_ENABLED = "false" + + + # GRPC + NEST_GRPC_ADDRESS = "static://room-server:3334" + BACKEND_GRPC_PORT = "8083" + + + + # socketio starter + SOCKET_URL = "http://socket-server:3002" + SOCKET_RECONNECTION_ATTEMPTS = "10" + SOCKET_RECONNECTION_DELAY = "500" + SOCKET_TIMEOUT = "5000" + + # oss starter + OSS_ENABLED = "true" + OSS_TYPE = "aws" + + + + + + + # IDaaS starter + IDAAS_ENABLED = "false" + + # Enterprise Env + SESSION_NAMESPACE = "vikadata:session" + DEFAULT_LOCALE = "zh-CN" + EMAIL_PERSONAL = "维格表" + + # user cooling-off time, unit: day + COOLING_OFF_PERIOD = 30 + + # scheduler + # close user job cron string, run at 0:00 every day + CLOSE_PAUSED_USER_CRON = "0 0 0 * * ?" + # disable heartbeat job cron + HEARTBEAT_CRON = "-" + + }, lookup(var.envs, "backend_server", {})) +} + +resource "kubernetes_config_map" "backend_server_env" { + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "backend-server-env" + namespace = var.namespace + + annotations = { + } + } + + data = local.backend_server_env +} diff --git a/modules/app/config_image_tags.tf b/modules/app/config_image_tags.tf new file mode 100644 index 0000000..36fec41 --- /dev/null +++ b/modules/app/config_image_tags.tf @@ -0,0 +1,34 @@ +//Image settings for all server +locals { + image_tags = merge({ + backend_server = var.image_tag + room_server = var.image_tag + web_server = var.image_tag + socket_server = var.image_tag + migration_server = var.image_tag + init_db = var.image_tag + space_job_executor = var.image_tag + imageproxy_server = var.image_tag + init_settings = var.image_tag + dingtalk_server = var.image_tag + ai_server = var.image_tag + databus_server = var.image_tag + }, var.image_tags) + + image_namespaces = merge({ + backend_server = var.image_namespace + room_server = var.image_namespace + web_server = var.image_namespace + socket_server = var.image_namespace + migration_server = var.image_namespace + init_db = var.image_namespace + space_job_executor = var.image_namespace + imageproxy_server = var.image_namespace + init_settings = var.image_namespace + dingtalk_server = var.image_namespace + job_admin_server = var.image_namespace + openresty = var.image_namespace + ai_server = var.image_namespace + databus_server = var.image_namespace + }, var.image_namespaces) +} diff --git a/modules/app/config_map_env.tf b/modules/app/config_map_env.tf new file mode 100644 index 0000000..327a2be --- /dev/null +++ b/modules/app/config_map_env.tf @@ -0,0 +1,42 @@ +locals { + //This is the global .env, it will be overwritten by the .env of other sub-services + env_config = merge({ + VIKA_DATA_PATH = "/data/${var.namespace}" + ENV = var.namespace + NODE_ENV = "production" + + # MySQL + MYSQL_HOST = "mysql-server" + MYSQL_PORT = "3306" + MYSQL_DATABASE = "apitable" + MYSQL_USERNAME = "apitable" + MYSQL_PASSWORD = "nby9WQX.zuf_dvp@vhw" + DATABASE_TABLE_PREFIX = "apitable_" + + # Redis + REDIS_HOST = "redis-master.apitable-datacenter" + REDIS_PORT = "6379" + REDIS_PASSWORD = "UHWCWiuUMVyupqmW4cXV" + REDIS_DB = "0" + + # RabbitMQ + RABBITMQ_HOST = "rabbitmq-headless.apitable-datacenter" + RABBITMQ_PORT = "5672" + RABBITMQ_USERNAME = "user" + RABBITMQ_PASSWORD = "7r4HVvsrwP4kQjAgj8Jj" + RABBITMQ_VHOST = "/" + + # backend + nest + SERVER_DOMAIN = "" + + ASSETS_URL = "assets" + ASSETS_BUCKET = "assets" + EDITION = "apitable-saas" + + CUSTOMER_NAME = null + STORAGE_CLASS_NAME = "" + + DATABUS_SERVER_BASE_URL = "http://databus-server:8625" + + }, var.env) +} diff --git a/modules/app/config_map_filebeat.tf b/modules/app/config_map_filebeat.tf new file mode 100644 index 0000000..909f442 --- /dev/null +++ b/modules/app/config_map_filebeat.tf @@ -0,0 +1,28 @@ +resource "kubernetes_config_map" "filebeat_config_in" { + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "filebeat-config-in" + namespace = var.namespace + + labels = { + sa-app = "filebeat" + feature = "sensors" + } + } + + data = { + "filebeat.yml" = <<-EOT + filebeat.inputs: + - type: log + #Read files starting with service_log in the /var/log/containers directory. + paths: + - /logs/sensors/backend-server*/service_log.* + + output.logstash: + #Cluster Intranet Logstash. + hosts: ["logstash.vika-datacenter.svc.cluster.local:5044"] + EOT + } +} diff --git a/modules/app/databus_server.tf b/modules/app/databus_server.tf new file mode 100644 index 0000000..bb99e32 --- /dev/null +++ b/modules/app/databus_server.tf @@ -0,0 +1,210 @@ +locals { + default_databus_server_resources = { + replicas = "1" + max_replicas = "10" + requests_cpu = "100m" + requests_memory = "512Mi" + limits_cpu = "1500m" + limits_memory = "8192Mi" + rolling_update_max_unavailable = "25%" + rolling_update_max_surge = "25%" + cpu_utilization_percentage = "500" + } +} + +locals { + databus_server_resources = merge(local.default_databus_server_resources, lookup(var.resources, "databus_server", {})) +} + +resource "kubernetes_deployment" "databus_server" { + wait_for_rollout = var.is_wait + count = var.has_databus_server ? 1 : 0 + + depends_on = [ + kubernetes_deployment.backend_server, + kubernetes_namespace.this + ] + + metadata { + name = "databus-server" + namespace = var.namespace + + labels = { + app = "databus-server" + } + + # annotations = { + # # "deployment.kubernetes.io/revision" = "23" + # "configmap.databus-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.databus_server_env.data)) : "not_enabled" + # } + } + + spec { + replicas = local.databus_server_resources["replicas"] + + selector { + match_labels = { + app = "databus-server" + } + } + + template { + metadata { + labels = { + app = "databus-server" + } + + annotations = { + # redeploy-timestamp = "1655781881366" + "configmap.databus-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.databus_server_env.data)) : "not_enabled" + } + } + + spec { + + node_selector = var.node_selector + + container { + name = "databus-server" + image = "${var.registry}/${lookup(local.image_namespaces, "databus_server")}/databus-server:${lookup(local.image_tags, "databus_server")}" + + env_from { + config_map_ref { + name = "databus-server-env" + } + } + + resources { + requests = { + cpu = local.databus_server_resources["requests_cpu"] + memory = local.databus_server_resources["requests_memory"] //@add_tf_local + } + limits = { + cpu = local.databus_server_resources["limits_cpu"] //@add_tf_local + memory = local.databus_server_resources["limits_memory"] //@add_tf_local + } + } + + liveness_probe { + http_get { + path = "/databus" + port = "8625" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 45 + success_threshold = 1 + failure_threshold = 3 + } + + readiness_probe { + http_get { + path = "/databus" + port = "8625" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 15 + success_threshold = 1 + failure_threshold = 3 + } + + startup_probe { + http_get { + path = "/databus" + port = "8625" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 1 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + image_pull_policy = var.image_pull_policy + } + + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = local.databus_server_resources["rolling_update_max_unavailable"] + max_surge = local.databus_server_resources["rolling_update_max_surge"] + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } + + //Whether to ignore the change of image tag. + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + ] + } +} + +resource "kubernetes_service" "databus_server" { + count = var.has_databus_server ? 1 : 0 + metadata { + name = "databus-server" + namespace = var.namespace + } + + spec { + port { + name = "databus-server-8625-8625" + protocol = "TCP" + port = 8625 + target_port = "8625" + } + + + selector = { + app = "databus-server" + } + + type = "ClusterIP" + session_affinity = "None" + ip_families = ["IPv4"] + } +} + +resource "kubernetes_horizontal_pod_autoscaler" "databus_server_autoscaler" { + count = var.has_databus_server ? 1 : 0 + metadata { + name = "databus-server-autoscaler-v2beta2" + namespace = var.namespace + } + + spec { + min_replicas = local.databus_server_resources["replicas"] + max_replicas = local.databus_server_resources["max_replicas"] + + scale_target_ref { + api_version = "apps/v1" + kind = "Deployment" + name = "databus-server" + } + target_cpu_utilization_percentage = local.databus_server_resources["cpu_utilization_percentage"] + } +} diff --git a/modules/app/databus_server_env.tf b/modules/app/databus_server_env.tf new file mode 100644 index 0000000..494542d --- /dev/null +++ b/modules/app/databus_server_env.tf @@ -0,0 +1,22 @@ +locals { + databus_server_env = merge(local.env_config, { + WEB_SOCKET_CHANNEL_ENV = var.namespace + BACKEND_BASE_URL = "http://backend-server:8081/api/v1/" + OSS_HOST = "/assets" + }, lookup(var.envs, "databus_server", {})) +} + +resource "kubernetes_config_map" "databus_server_env" { + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "databus-server-env" + namespace = var.namespace + + annotations = { + } + } + + data = local.databus_server_env +} diff --git a/modules/app/fusion_server.tf b/modules/app/fusion_server.tf new file mode 100644 index 0000000..a0a00d1 --- /dev/null +++ b/modules/app/fusion_server.tf @@ -0,0 +1,222 @@ +locals { + default_fusion_server_resources = { + replicas = "1" + max_replicas = "50" + requests_cpu = "100m" + requests_memory = "512Mi" + limits_cpu = "1500m" + limits_memory = "8192Mi" + rolling_update_max_unavailable = "25%" + rolling_update_max_surge = "25%" + cpu_utilization_percentage = "500" + } +} + +locals { + fusion_server_resources = merge(local.default_fusion_server_resources, lookup(var.resources, "fusion_server", {})) +} + +resource "kubernetes_deployment" "fusion_server" { + count = var.has_fusion_server ? 1 : 0 + + depends_on = [ + kubernetes_deployment.backend_server, + kubernetes_namespace.this + ] + + metadata { + name = "fusion-server" + namespace = var.namespace + + labels = { + app = "fusion-server" + } + + } + + spec { + replicas = local.fusion_server_resources["replicas"] + + selector { + match_labels = { + app = "fusion-server" + } + } + + template { + metadata { + labels = { + app = "fusion-server" + } + + annotations = { + # redeploy-timestamp = "1655781881366" + "configmap.fusion-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.fusion_server_env.data)) : "not_enabled" + } + } + + spec { + + node_selector = var.node_selector + + container { + name = "fusion-server" + image = "${var.registry}/${lookup(local.image_namespaces, "room_server")}/room-server:${lookup(local.image_tags, "room_server")}" + + env_from { + config_map_ref { + name = "fusion-server-env" + } + } + + + resources { + requests = { + cpu = local.fusion_server_resources["requests_cpu"] + memory = local.fusion_server_resources["requests_memory"] //@add_tf_local + } + + limits = { + cpu = local.fusion_server_resources["limits_cpu"] //@add_tf_local + memory = local.fusion_server_resources["limits_memory"] //@add_tf_local + } + } + + liveness_probe { + http_get { + path = "/actuator/health" + port = "3333" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 30 + success_threshold = 1 + failure_threshold = 3 + } + + readiness_probe { + http_get { + path = "/actuator/health" + port = "3333" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 15 + success_threshold = 1 + failure_threshold = 3 + } + + startup_probe { + http_get { + path = "/actuator/health" + port = "3333" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 1 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + volume_mount { + name = "fusion-server-1658224800000" + mount_path = "/home/vikadata/packages/room-server/logs" + } + + volume_mount { + name = "fusion-server-1658224800000" + mount_path = "/app/packages/room-server/logs" + } + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + image_pull_policy = var.image_pull_policy + } + + volume { + name = "fusion-server-1658224800000" + empty_dir {} + } + + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = local.fusion_server_resources["rolling_update_max_unavailable"] + max_surge = local.fusion_server_resources["rolling_update_max_surge"] + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } + + //Whether to ignore the change of image tag. + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + ] + } +} + +resource "kubernetes_service" "fusion_server" { + count = var.has_fusion_server ? 1 : 0 + metadata { + name = "fusion-server" + namespace = var.namespace + } + + spec { + port { + name = "fusion-server-3333-3333" + protocol = "TCP" + port = 3333 + target_port = "3333" + } + + selector = { + app = "fusion-server" + } + + type = "ClusterIP" + session_affinity = "None" + ip_families = ["IPv4"] + } +} + + +resource "kubernetes_horizontal_pod_autoscaler" "fusion_server_autoscaler" { + count = var.has_fusion_server ? 1 : 0 + metadata { + name = "fusion-server-autoscaler-v2beta2" + namespace = var.namespace + } + + spec { + min_replicas = local.fusion_server_resources["replicas"] + max_replicas = local.fusion_server_resources["max_replicas"] + + scale_target_ref { + api_version = "apps/v1" + kind = "Deployment" + name = "fusion-server" + } + target_cpu_utilization_percentage = local.fusion_server_resources["cpu_utilization_percentage"] + + } +} diff --git a/modules/app/fusion_server_env.tf b/modules/app/fusion_server_env.tf new file mode 100644 index 0000000..fc866c3 --- /dev/null +++ b/modules/app/fusion_server_env.tf @@ -0,0 +1,29 @@ +locals { + fusion_server_env = merge(local.env_config, { + APPLICATION_NAME = "FUSION_SERVER" + INSTANCE_COUNT = "1" + LOG_LEVEL = "info" + BACKEND_BASE_URL = "http://backend-server:8081/api/v1/" + SOCKET_GRPC_URL = "socket-server:3007" + OSS_HOST = "/assets" + OSS_TYPE = "QNY1" + OSS_CACHE_TYPE = "" + ZIPKIN_ENABLED = "false" + ROBOT_OFFICIAL_SERVICE_SLUG = "vika" + }, lookup(var.envs, "fusion_server", {})) +} + +resource "kubernetes_config_map" "fusion_server_env" { + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "fusion-server-env" + namespace = var.namespace + + annotations = { + } + } + + data = local.fusion_server_env +} diff --git a/modules/app/imageproxy_server.tf b/modules/app/imageproxy_server.tf new file mode 100644 index 0000000..e310b86 --- /dev/null +++ b/modules/app/imageproxy_server.tf @@ -0,0 +1,170 @@ +locals { + default_imageproxy_server_resources = { + replicas = "1" + requests_cpu = "500m" + requests_memory = "1024Mi" + limits_cpu = "1500m" + limits_memory = "8192Mi" + rolling_update_max_unavailable = "0%" + rolling_update_max_surge = "25%" + } +} + +locals { + imageproxy_server_resources = merge(local.default_imageproxy_server_resources, lookup(var.resources, "imageproxy_server", {})) +} + +resource "kubernetes_deployment" "imageproxy_server" { + wait_for_rollout = var.is_wait + count = var.has_imageproxy_server ? 1 : 0 + depends_on = [ + kubernetes_namespace.this + ] + + metadata { + name = "imageproxy-server" + namespace = var.namespace + + labels = { + app = "imageproxy-server" + } + + # annotations = { + # # "deployment.kubernetes.io/revision" = "23" + # # "configmap.imageproxy-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.imageproxy_server_env.data)) : "not_enabled" + # } + } + + spec { + replicas = local.imageproxy_server_resources["replicas"] + + selector { + match_labels = { + app = "imageproxy-server" + } + } + + template { + metadata { + labels = { + app = "imageproxy-server" + } + + annotations = { + # redeploy-timestamp = "1655781881366" + "configmap.imageproxy-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.imageproxy_server_env[count.index].data)) : "not_enabled" + } + } + + spec { + + node_selector = var.node_selector + + container { + name = "imageproxy-server" + image = "${var.registry}/${lookup(local.image_namespaces, "imageproxy_server")}/imageproxy-server:${lookup(local.image_tags, "imageproxy_server")}" + + env_from { + config_map_ref { + name = "imageproxy-server-env" + } + } + + resources { + requests = { + cpu = local.imageproxy_server_resources["requests_cpu"] + memory = local.imageproxy_server_resources["requests_memory"] + } + limits = { + cpu = local.imageproxy_server_resources["limits_cpu"] + memory = local.imageproxy_server_resources["limits_memory"] + } + } + + liveness_probe { + http_get { + path = "/metrics" + port = "8080" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 15 + success_threshold = 1 + failure_threshold = 3 + } + + readiness_probe { + http_get { + path = "/metrics" + port = "8080" + scheme = "HTTP" + } + + initial_delay_seconds = 5 + timeout_seconds = 3 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + image_pull_policy = var.image_pull_policy + } + + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = "25%" + max_surge = "25%" + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } + + //Whether to ignore the change of image tag. + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + ] + } +} + +resource "kubernetes_service" "imageproxy_server" { + count = var.has_imageproxy_server ? 1 : 0 + metadata { + name = "imageproxy-server" + namespace = var.namespace + } + + spec { + port { + name = "imageproxy-server-8080-8080" + protocol = "TCP" + port = 80 + target_port = "8080" + } + + selector = { + app = "imageproxy-server" + } + + type = "ClusterIP" + session_affinity = "None" + ip_families = ["IPv4"] + } +} diff --git a/modules/app/imageproxy_server_env.tf b/modules/app/imageproxy_server_env.tf new file mode 100644 index 0000000..f806db5 --- /dev/null +++ b/modules/app/imageproxy_server_env.tf @@ -0,0 +1,25 @@ +locals { + imageproxy_server_env = merge(local.env_config, { + BASEURL = "http://minio:9000", + TZ = "Asia/Shanghai", + IMAGEPROXY_CACHE = "/tmp/imageproxy" + }, lookup(var.envs, "imageproxy_server", {})) +} + +resource "kubernetes_config_map" "imageproxy_server_env" { + + count = var.has_imageproxy_server ? 1 : 0 + + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "imageproxy-server-env" + namespace = var.namespace + + annotations = { + } + } + + data = local.imageproxy_server_env +} diff --git a/modules/app/job_admin_server.app.tf b/modules/app/job_admin_server.app.tf new file mode 100644 index 0000000..dde6ac8 --- /dev/null +++ b/modules/app/job_admin_server.app.tf @@ -0,0 +1,124 @@ +# env +locals { + default_job_admin_server_resources = { + replicas = "1" + requests_cpu = "100m" + requests_memory = "512Mi" + limits_cpu = "500m" + limits_memory = "1024Mi" + rolling_update_max_unavailable = "25%" + rolling_update_max_surge = "25%" + } +} +locals { + job_admin_server_env = merge(local.env_config, { + + # Job Admin Server + Executor + # (xxl)JOB(The variable values of the dispatch center and the executor must be consistent) + JOB_ACCESS_TOKEN = "onJvanLmSE3CqotjNp8hb7WRolpM1pdL" + JOB_ADMIN_ADDRESS = "http://job-admin-server:8080/job-admin" + + # SMTP Email push configuration + MAIL_HOST = "smtp.feishu.cn" + MAIL_USERNAME = "email-server@vikadata.com" + MAIL_PASSWORD = "Qwer123456" + + }, lookup(var.envs, "job_admin_server", {})) + + job_admin_server_resources = merge(local.default_job_admin_server_resources, lookup(var.resources, "job_admin_server", {})) +} + +resource "kubernetes_deployment" "job_admin_server" { + wait_for_rollout = var.is_wait + count = var.has_job_admin_server ? 1 : 0 + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "job-admin-server" + namespace = var.namespace + + labels = { + app = "job-admin-server" + } + + annotations = { + # "deployment.kubernetes.io/revision" = "23" + } + } + + spec { + replicas = local.job_admin_server_resources["replicas"] + + selector { + match_labels = { + app = "job-admin-server" + } + } + + template { + metadata { + labels = { + app = "job-admin-server" + } + + annotations = { + # redeploy-timestamp = "1655781881366" + } + } + + spec { + container { + name = "job-admin-server" + image = "${var.registry}/${lookup(local.image_namespaces, "job_admin_server")}/xxl-job-admin:2.3.0" + + env { + name = "PARAMS" + value = "--server.servlet.context-path=/job-admin --spring.datasource.url=jdbc:mysql://${lookup(local.job_admin_server_env, "MYSQL_HOST", "localhost")}:${lookup(local.job_admin_server_env, "MYSQL_PORT", "3306")}/${lookup(local.job_admin_server_env, "MYSQL_DATABASE", "")}?Unicode=true&characterEncoding=UTF-8 --spring.datasource.username=${lookup(local.job_admin_server_env, "MYSQL_USERNAME", "root")} --spring.datasource.password=${sensitive(lookup(local.job_admin_server_env, "MYSQL_PASSWORD", ""))} --xxl.job.accessToken=${lookup(local.job_admin_server_env, "JOB_ACCESS_TOKEN", "")} --spring.mail.host=${lookup(local.job_admin_server_env, "MAIL_HOST", "root")} --spring.mail.port=465 --spring.mail.username=${lookup(local.job_admin_server_env, "MAIL_USERNAME", "root")} --spring.mail.from=${lookup(local.job_admin_server_env, "MAIL_USERNAME", "root")} --spring.mail.password=${lookup(local.job_admin_server_env, "MAIL_PASSWORD", "")} --spring.mail.properties.mail.smtp.socketFactory.port=465" + } + + resources { + requests = { + cpu = local.job_admin_server_resources["requests_cpu"] + memory = local.job_admin_server_resources["requests_memory"] //@add_tf_local + } + limits = { + cpu = local.job_admin_server_resources["limits_cpu"] //@add_tf_local + memory = local.job_admin_server_resources["limits_memory"] //@add_tf_local + } + } + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + image_pull_policy = var.image_pull_policy + } + + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = local.job_admin_server_resources["rolling_update_max_unavailable"] + max_surge = local.job_admin_server_resources["rolling_update_max_surge"] + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } + + //Whether to ignore the change of image tag. + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + ] + } +} diff --git a/modules/app/job_admin_server.svc.tf b/modules/app/job_admin_server.svc.tf new file mode 100644 index 0000000..5750d9f --- /dev/null +++ b/modules/app/job_admin_server.svc.tf @@ -0,0 +1,30 @@ + +# server +resource "kubernetes_service" "job_admin_server" { + count = var.has_job_admin_server ? 1 : 0 + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "job-admin-server" + namespace = var.namespace + } + + spec { + port { + name = "job-admin-server-8080-8080" + protocol = "TCP" + port = 8080 + target_port = "8080" + } + + selector = { + app = "job-admin-server" + } + + type = "ClusterIP" + session_affinity = "None" + ip_families = ["IPv4"] + } +} + diff --git a/modules/app/nest_rest_server.tf b/modules/app/nest_rest_server.tf new file mode 100644 index 0000000..8b81d73 --- /dev/null +++ b/modules/app/nest_rest_server.tf @@ -0,0 +1,221 @@ +locals { + default_nest_rest_server_resources = { + replicas = "1" + max_replicas = "50" + requests_cpu = "100m" + requests_memory = "512Mi" + limits_cpu = "1500m" + limits_memory = "8192Mi" + rolling_update_max_unavailable = "25%" + rolling_update_max_surge = "25%" + cpu_utilization_percentage = "500" + } +} + +locals { + nest_rest_server_resources = merge(local.default_nest_rest_server_resources, lookup(var.resources, "nest_rest_server", {})) +} + +resource "kubernetes_deployment" "nest_rest_server" { + wait_for_rollout = var.is_wait + count = var.has_nest_rest_server ? 1 : 0 + + depends_on = [ + kubernetes_deployment.backend_server, + kubernetes_namespace.this + ] + + metadata { + name = "nest-rest-server" + namespace = var.namespace + + labels = { + app = "nest-rest-server" + } + + annotations = { + # "deployment.kubernetes.io/revision" = "1" + } + } + + spec { + replicas = local.nest_rest_server_resources["replicas"] + + selector { + match_labels = { + app = "nest-rest-server" + } + } + + template { + metadata { + labels = { + app = "nest-rest-server" + } + + annotations = { + # redeploy-timestamp = "1655781881366" + } + } + + spec { + + node_selector = var.node_selector + + container { + name = "nest-rest-server" + image = "${var.registry}/${lookup(local.image_namespaces, "room_server")}/room-server:${lookup(local.image_tags, "room_server")}" + + env_from { + config_map_ref { + name = "nest-rest-server-env" + } + } + + resources { + requests = { + cpu = local.nest_rest_server_resources["requests_cpu"] + memory = local.nest_rest_server_resources["requests_memory"] //@add_tf_local + } + + limits = { + cpu = local.fusion_server_resources["limits_cpu"] //@add_tf_local + memory = local.fusion_server_resources["limits_memory"] //@add_tf_local + } + } + + liveness_probe { + http_get { + path = "/actuator/health" + port = "3333" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 45 + success_threshold = 1 + failure_threshold = 3 + } + + readiness_probe { + http_get { + path = "/actuator/health" + port = "3333" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 15 + success_threshold = 1 + failure_threshold = 3 + } + + startup_probe { + http_get { + path = "/actuator/health" + port = "3333" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 1 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + volume_mount { + name = "nest-rest-server-1658224800000" + mount_path = "/home/vikadata/packages/room-server/logs" + } + + volume_mount { + name = "nest-rest-server-1658224800000" + mount_path = "/app/packages/room-server/logs" + } + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + image_pull_policy = var.image_pull_policy + } + + volume { + name = "nest-rest-server-1658224800000" + empty_dir {} + } + + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = local.nest_rest_server_resources["rolling_update_max_unavailable"] + max_surge = local.nest_rest_server_resources["rolling_update_max_surge"] + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } + + //Whether to ignore the change of image tag. + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + ] + } +} + +resource "kubernetes_service" "nest_rest_server" { + metadata { + name = "nest-rest-server" + namespace = var.namespace + } + + spec { + port { + name = "nest-rest-server-3333-3333" + protocol = "TCP" + port = 3333 + target_port = "3333" + } + + selector = { + app = var.has_nest_rest_server ? "nest-rest-server" : "room-server" + } + + type = "ClusterIP" + session_affinity = "None" + ip_families = ["IPv4"] + } +} + +resource "kubernetes_horizontal_pod_autoscaler" "nest_rest_server_autoscaler" { + count = var.has_room_server ? 1 : 0 + metadata { + name = "nest-rest-server-autoscaler-v2beta2" + namespace = var.namespace + } + + spec { + min_replicas = local.nest_rest_server_resources["replicas"] + max_replicas = local.nest_rest_server_resources["max_replicas"] + + scale_target_ref { + api_version = "apps/v1" + kind = "Deployment" + name = "nest-rest-server" + } + target_cpu_utilization_percentage = local.nest_rest_server_resources["cpu_utilization_percentage"] + } +} diff --git a/modules/app/nest_rest_server_env.tf b/modules/app/nest_rest_server_env.tf new file mode 100644 index 0000000..d0f590b --- /dev/null +++ b/modules/app/nest_rest_server_env.tf @@ -0,0 +1,30 @@ +locals { + nest_rest_server_env = merge(local.env_config, { + WEB_SOCKET_CHANNEL_ENV = var.namespace + APPLICATION_NAME = "NEST_REST_SERVER" + INSTANCE_COUNT = "1" + LOG_LEVEL = "info" + BACKEND_BASE_URL = "http://backend-server:8081/api/v1/" + SOCKET_GRPC_URL = "socket-server:3007" + OSS_HOST = "/assets" + OSS_TYPE = "QNY1" + ZIPKIN_ENABLED = "false" + ROBOT_OFFICIAL_SERVICE_SLUG = "vika" + }, lookup(var.envs, "nest_rest_server", {})) +} + +resource "kubernetes_config_map" "nest_rest_server_env" { + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "nest-rest-server-env" + namespace = var.namespace + + annotations = { + } + } + + + data = local.nest_rest_server_env +} diff --git a/modules/app/openresty.app.tf b/modules/app/openresty.app.tf new file mode 100644 index 0000000..03ad8c7 --- /dev/null +++ b/modules/app/openresty.app.tf @@ -0,0 +1,454 @@ +# env +locals { + default_openresty_resources = { + replicas = "1" + requests_cpu = "250m" + requests_memory = "512Mi" + limits_cpu = "2000m" + limits_memory = "4096Mi" + rolling_update_max_unavailable = "25%" + rolling_update_max_surge = "25%" + lifecycle_post_start_command = [ + "/bin/sh", "-c", + "ls" + ] + } +} + +locals { + openresty_resources = merge(local.default_openresty_resources, lookup(var.resources, "openresty", {})) +} + +resource "kubernetes_deployment" "openresty" { + wait_for_rollout = var.is_wait + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + metadata[0].annotations, + spec[0].template[0].metadata[0].annotations + ] + } + count = var.has_openresty ? 1 : 0 + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "openresty" + namespace = var.namespace + + labels = { + app = "openresty" + } + + } + + spec { + replicas = local.openresty_resources["replicas"] + + selector { + match_labels = { + app = "openresty" + } + } + + template { + metadata { + labels = { + app = "openresty" + } + + annotations = { + # redeploy-timestamp = "1655996168285" + "configmap.openresty-config/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.openresty_config.data)) : "not_enabled" + } + } + + spec { + + node_selector = var.node_selector + + volume { + name = "volume-1655994097224" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655994376906" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995785123" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995785124" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995807238" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995833183" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + + volume { + name = "volume-1655995925041" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995926172" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995964531" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995964532" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995965437" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995965438" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995965439" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-1655995965440" + + config_map { + name = "openresty-config" + default_mode = "0644" + } + } + + volume { + name = "volume-ssl-certs" + + secret { + secret_name = "openresty-ssl-certs" + default_mode = "0644" + } + } + + volume { + name = "volume-extend-ssl-certs" + + secret { + secret_name = var.has_extend_tls ? "openresty-extend-ssl-certs" : "openresty-ssl-certs" + default_mode = "0644" + } + } + + container { + name = "openresty" + # 重打包镜像,增加了resty-http 模块 + image = "${var.registry}/${lookup(local.image_namespaces, "openresty")}/openresty:1.21.4.1-http-fat" + + resources { + requests = { + cpu = local.openresty_resources["requests_cpu"] //@add_tf_local + memory = local.openresty_resources["requests_memory"] //@add_tf_local + } + limits = { + cpu = local.openresty_resources["limits_cpu"] //@add_tf_local + memory = local.openresty_resources["limits_memory"] //@add_tf_local + } + } + #增加读取大表灰度功能 + lifecycle { + post_start { + exec { + command = local.openresty_resources["lifecycle_post_start_command"] + } + } + } + + volume_mount { + name = "volume-1655994097224" + mount_path = "/usr/local/openresty/nginx/conf/nginx.conf" + sub_path = "nginx.conf" + } + + volume_mount { + name = "volume-1655994097224" + mount_path = "/etc/nginx/conf.d/vhost/ssl_host.conf" + sub_path = var.enable_ssl ? "ssl-host.conf" : "ssl-default.conf" + } + + volume_mount { + name = "volume-1655994097224" + mount_path = "/etc/nginx/conf.d/vhost/openresty_extra_config.conf" + sub_path = "openresty_extra_config.conf" + } + + volume_mount { + mount_path = "/etc/nginx/conf.d/certs/tls.crt" + name = "volume-ssl-certs" + sub_path = "tls.crt" + } + + + volume_mount { + mount_path = "/etc/nginx/conf.d/certs/tls.key" + name = "volume-ssl-certs" + sub_path = "tls.key" + } + + volume_mount { + mount_path = "/etc/nginx/conf.d/certs/extend-tls.crt" + name = "volume-extend-ssl-certs" + sub_path = "tls.crt" + } + + + volume_mount { + mount_path = "/etc/nginx/conf.d/certs/extend-tls.key" + name = "volume-extend-ssl-certs" + sub_path = "tls.key" + } + + volume_mount { + name = "volume-1655994376906" + mount_path = "/etc/nginx/conf.d/upstream/ups-socket-server.conf" + sub_path = "ups-socket-server.conf" + } + + volume_mount { + name = "volume-1655995785123" + mount_path = "/etc/nginx/conf.d/upstream/ups-room-server.conf" + sub_path = "ups-room-server.conf" + } + + volume_mount { + name = "volume-1655995785124" + mount_path = "/etc/nginx/conf.d/upstream/ups-fusion-server.conf" + sub_path = "ups-fusion-server.conf" + } + + volume_mount { + name = "volume-1655995807238" + mount_path = "/etc/nginx/conf.d/upstream/ups-backend-server.conf" + sub_path = "ups-backend-server.conf" + } + + volume_mount { + name = "volume-1655995833183" + mount_path = "/etc/nginx/conf.d/upstream/ups-web-server.conf" + sub_path = "ups-web-server.conf" + } + + volume_mount { + name = "volume-1655995833183" + mount_path = "/etc/nginx/conf.d/upstream/ups-ai-server.conf" + sub_path = "ups-ai-server.conf" + } + volume_mount { + name = "volume-1655995965440" + mount_path = "/etc/nginx/conf.d/server/ai-server.conf" + sub_path = "ai-server.conf" + } + + volume_mount { + name = "volume-1655995833183" + mount_path = "/etc/nginx/conf.d/upstream/ups-databus-server.conf" + sub_path = "ups-databus-server.conf" + } + + volume_mount { + name = "volume-1655995965440" + mount_path = "/etc/nginx/conf.d/server/databus-server.conf" + sub_path = "databus-server.conf" + } + volume_mount { + name = "volume-1655995925041" + mount_path = "/etc/nginx/conf.d/server/web-server.conf" + sub_path = "web-server.conf" + } + + volume_mount { + name = "volume-1655995926172" + mount_path = "/etc/nginx/conf.d/server/backend-server.conf" + sub_path = "backend-server.conf" + } + + volume_mount { + name = "volume-1655995964531" + mount_path = "/etc/nginx/conf.d/server/room-server.conf" + sub_path = "room-server.conf" + } + + volume_mount { + name = "volume-1655995964532" + mount_path = "/etc/nginx/conf.d/server/fusion-server.conf" + sub_path = "fusion-server.conf" + } + + volume_mount { + name = "volume-1655995965437" + mount_path = "/etc/nginx/conf.d/server/socket-server.conf" + sub_path = "socket-server.conf" + } + + volume_mount { + name = "volume-1655995965438" + mount_path = "/etc/nginx/conf.d/server/job-admin-server.conf" + sub_path = "job-admin-server.conf" + } + + volume_mount { + name = "volume-1655995965439" + mount_path = "/etc/nginx/conf.d/upstream/ups-job-admin-server.conf" + sub_path = "ups-job-admin-server.conf" + } + + volume_mount { + name = "volume-1655995965440" + mount_path = "/etc/nginx/conf.d/server/lbs-amap.conf" + sub_path = "lbs-amap.conf" + } + + volume_mount { + name = "volume-1655995965440" + mount_path = "/etc/nginx/conf.d/server/static-proxy.conf" + sub_path = "static-proxy.conf" + } + + volume_mount { + name = "volume-1655995965440" + mount_path = "/etc/nginx/conf.d/server/imageproxy-server.conf" + sub_path = var.has_imageproxy_server ? "imageproxy-server.conf" : "blank.config" + } + + volume_mount { + name = "volume-1655995965440" + mount_path = "/usr/local/openresty/nginx/html/robots.txt" + sub_path = var.disallow_robots ? "disable_robots.txt" : "enable_robots.txt" + } + + liveness_probe { + http_get { + path = "/healthz" + port = "80" + scheme = "HTTP" + } + + initial_delay_seconds = 5 + timeout_seconds = 1 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + readiness_probe { + http_get { + path = "/healthz" + port = "80" + scheme = "HTTP" + } + + initial_delay_seconds = 5 + timeout_seconds = 1 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + image_pull_policy = "Always" + } + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = "25%" + max_surge = "25%" + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } +} diff --git a/modules/app/openresty.configmap.tf b/modules/app/openresty.configmap.tf new file mode 100644 index 0000000..73c45ea --- /dev/null +++ b/modules/app/openresty.configmap.tf @@ -0,0 +1,696 @@ +resource "kubernetes_config_map" "openresty_config" { + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "openresty-config" + namespace = var.namespace + } + + data = { + "backend-server.conf" = <<-EOT + location /api { + proxy_pass http://backend; + proxy_set_header X-Real-Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Real-PORT $remote_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header Access-Control-Allow-Headers 'Cookie,Set-Cookie,x-requested-with,content-type'; + proxy_set_header Access-Control-Allow-Origin $http_origin ; + proxy_set_header 'Access-Control-Allow-Credentials' 'true'; + add_header 'Access-Control-Allow-Methods' 'GET,POST,PUT,OPTIONS'; + + proxy_connect_timeout 180s; + proxy_read_timeout 180s; + proxy_send_timeout 180s; + } + + location /api/v1/node/readShareInfo { + proxy_pass http://backend; + proxy_set_header X-Real-Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Real-PORT $remote_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Original-URI $request_uri; + #Open API cross-domain configuration + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Allow-Credentials' 'true' always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PATCH' always; + add_header 'Access-Control-Allow-Headers' 'Origin, X-Requested-With,Content-Type, Accept' always; + if ($request_method = 'OPTIONS' ) { + return 204; + } + proxy_connect_timeout 180s; + proxy_read_timeout 180s; + proxy_send_timeout 180s; + } + + EOT + + "nginx.conf" = <<-EOT + worker_processes ${var.worker_processes}; + # Enables the use of JIT for regular expressions to speed-up their processing. + pcre_jit on; + + #error_log logs/error.log; + #error_log logs/error.log info; + #pid logs/nginx.pid; + + events { + worker_connections 204800; + multi_accept on; + } + http { + include mime.types; + default_type application/octet-stream; + log_format json_combined escape=json '{"@timestamp":"$time_iso8601",' + '"@source":"$server_addr",' + '"@nginx_fields":{' + '"remote_addr":"$remote_addr",' + '"body_bytes_sent":"$body_bytes_sent",' + '"request_time":"$request_time",' + '"status":"$status",' + '"host":"$host",' + '"uri":"$uri",' + '"server":"$server_name",' + '"request_uri":"$request_uri",' + '"request_method":"$request_method",' + '"http_referrer":"$http_referer",' + '"body_bytes_sent":"$body_bytes_sent",' + '"http_x_forwarded_for":"$http_x_forwarded_for",' + '"http_user_agent":"$http_user_agent",' + '"upstream_response_time":"$upstream_response_time",' + '"upstream_status":"$upstream_status",' + '"upstream_addr":"$upstream_addr"}}'; + + access_log /dev/stdout json_combined; + server_tokens off; + + # Configure Gray Shared Dictionary + lua_shared_dict gray 64m; + + #Max size for file upload + client_max_body_size 1024m; + + ##cache## + proxy_buffer_size 16k; + proxy_buffers 4 64k; + proxy_busy_buffers_size 128k; + proxy_cache_path /tmp/cache levels=1:2 keys_zone=cache_one:200m inactive=1d max_size=3g; + + gzip on; + gzip_proxied any; + gzip_comp_level 5; + gzip_buffers 16 8k; + gzip_min_length 1024; + gzip_http_version 1.1; + gzip_types text/plain application/x-javascript text/css text/javascript application/json application/javascript; + + include /etc/nginx/conf.d/upstream/*.conf; + map $http_upgrade $connection_upgrade { + default upgrade; + '' close; + } + + server { + listen 80 default; + server_name 127.0.0.1; + charset utf-8; + error_page 404 502 503 /404; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Real-PORT $remote_port; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + include /etc/nginx/conf.d/server/*.conf; + + location =/healthz { + return 200; + access_log off; + } + + location =/robots.txt { + root /usr/local/openresty/nginx/html; + } + + ${var.openresty_server_block} + } + + include /etc/nginx/conf.d/vhost/*.conf; + } + EOT + + #Default ssl configuration + "ssl-default.conf" = "" + + "ssl-host.conf" = <<-EOT + server { + listen 443 ssl http2; + server_name ${var.server_name}; + + ssl_certificate /etc/nginx/conf.d/certs/tls.crt; + ssl_certificate_key /etc/nginx/conf.d/certs/tls.key; + ssl_session_timeout 1d; + ssl_session_cache shared:MozSSL:10m; # about 40000 sessions + ssl_session_tickets off; + + # intermediate configuration + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384; + ssl_prefer_server_ciphers off; + + charset utf-8; + + error_page 404 502 503 /404; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Real-PORT $remote_port; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + include /etc/nginx/conf.d/server/*.conf; + + ${var.openresty_server_block} + + location =/healthz { + return 200; + access_log off; + } + + location =/robots.txt { + root /usr/local/openresty/nginx/html; + } + } + EOT + + "room-server.conf" = <<-EOT + location ~* ^/(actuator|nest) { + proxy_pass http://nest-rest; + + proxy_set_header X-Real-Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Real-PORT $remote_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Original-URI $request_uri; + + proxy_connect_timeout 180s; + proxy_read_timeout 180s; + proxy_send_timeout 180s; + } + + %{if var.has_document_server} + location /document { + proxy_pass http://document; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header X-Nginx-Proxy true; + proxy_set_header Host $host:$server_port; + proxy_http_version 1.1; + proxy_connect_timeout 300s; + proxy_read_timeout 300s; + proxy_send_timeout 300s; + } + %{endif} + + %{if var.has_ai_server} + location ^~ /nest/v1/ai { + proxy_pass http://nest-rest; + + chunked_transfer_encoding off; + proxy_buffering off; + proxy_cache off; + proxy_http_version 1.1; + proxy_set_header Connection ''; + + proxy_set_header X-Real-Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Real-PORT $remote_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Original-URI $request_uri; + + proxy_connect_timeout 180s; + proxy_read_timeout 180s; + proxy_send_timeout 180s; + } + %{endif} + EOT + + "fusion-server.conf" = <<-EOT + location /fusion { + proxy_pass http://fusion; + proxy_next_upstream error http_502 non_idempotent; + proxy_next_upstream_tries 3; + error_page 404 503 /404; + + proxy_set_header X-Real-Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Real-PORT $remote_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Original-URI $request_uri; + #Open API cross-domain configuration + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Allow-Credentials' 'true' always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PATCH' always; + add_header 'Access-Control-Allow-Headers' 'Authorization,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range,x-vika-user-agent' always; + if ($request_method = 'OPTIONS' ) { + return 204; + } + proxy_connect_timeout 180s; + proxy_read_timeout 180s; + proxy_send_timeout 180s; + } + EOT + + "socket-server.conf" = <<-EOT + location /room { + proxy_pass http://socketRoom; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header X-Nginx-Proxy true; + proxy_set_header Host $host:$server_port; + proxy_http_version 1.1; + proxy_connect_timeout 300s; + proxy_read_timeout 300s; + proxy_send_timeout 300s; + } + + location /notification { + proxy_pass http://socket; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header X-Nginx-Proxy true; + proxy_set_header Host $host:$server_port; + proxy_http_version 1.1; + proxy_connect_timeout 300s; + proxy_read_timeout 300s; + proxy_send_timeout 300s; + } + EOT + + "ai-server.conf" = <<-EOT + %{if var.has_ai_server} + location /ai { + proxy_pass http://ai-server; + proxy_set_header X-Real-Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Real-PORT $remote_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Original-URI $request_uri; + proxy_set_header Access-Control-Allow-Headers 'Cookie,Set-Cookie,x-requested-with,content-type'; + proxy_set_header Access-Control-Allow-Origin $http_origin ; + proxy_set_header 'Access-Control-Allow-Credentials' 'true'; + proxy_buffering off; + add_header 'Access-Control-Allow-Methods' 'GET,POST,PUT,OPTIONS'; + + proxy_connect_timeout 180s; + proxy_read_timeout 180s; + proxy_send_timeout 180s; + } + + %{endif} + EOT + + "ups-ai-server.conf" = <<-EOT + %{if var.has_ai_server} + upstream ai-server { + server ai-server:8626; + } + %{endif} + + EOT + + "databus-server.conf" = <<-EOT + %{if var.has_databus_server && var.publish_databus_server} + location /databus { + proxy_pass http://databus-server; + + proxy_set_header X-Real-Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Real-PORT $remote_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Original-URI $request_uri; + + proxy_connect_timeout 180s; + proxy_read_timeout 180s; + proxy_send_timeout 180s; + } + location /fusion/v3 { + proxy_pass http://databus-server; + + proxy_set_header X-Real-Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Real-PORT $remote_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Original-URI $request_uri; + + proxy_connect_timeout 180s; + proxy_read_timeout 180s; + proxy_send_timeout 180s; + } + + %{endif} + EOT + + "ups-databus-server.conf" = <<-EOT + %{if var.has_databus_server} + upstream databus-server { + server databus-server:8625; + } + %{endif} + + EOT + + "job-admin-server.conf" = <<-EOT + #URI aligns the context-path of the service environment variable (guaranteed that after the service is redirected, it can still be routed to the service) + %{if var.has_job_admin_server} + location /job-admin { + proxy_pass http://job-admin-server; + } + %{endif} + EOT + + "ups-job-admin-server.conf" = <<-EOT + %{if var.has_job_admin_server} + upstream job-admin-server { + server ${var.job_admin_server_host}:8080; + } + %{endif} + EOT + + "ups-backend-server.conf" = <<-EOT + upstream backend { + server backend-server:8081; + } + EOT + + "ups-room-server.conf" = <<-EOT + upstream room { + server room-server:3333; + } + upstream nest-rest { + server nest-rest-server:3333; + } + %{if var.has_document_server} + upstream document { + server document-server:3006; + } + %{else} + upstream document { + server room-server:3006; + } + %{endif} + EOT + + "ups-fusion-server.conf" = <<-EOT + upstream fusion { + server fusion-server:3333 max_fails=0; + server fusion-server:3333 max_fails=0; + server fusion-server:3333 max_fails=0; + } + + EOT + + "ups-socket-server.conf" = <<-EOT + upstream socket { + server socket-server:3002; + } + upstream socketRoom { + server socket-server:3005; + } + EOT + + "ups-web-server.conf" = <<-EOT + upstream web-server { + server web-server:8080; + } + EOT + + "web-server.conf" = <<-EOT + #Default official website path + location / { + proxy_set_header X-Nginx-Proxy true; + proxy_set_header Host %{if var.default_server_host_override_proxy_host != ""}${var.default_server_host_override_proxy_host}%{else}$http_host%{endif}; + proxy_set_header X-Original-URI $request_uri; + proxy_http_version 1.1; + proxy_pass ${var.default_server_host}; + + ${var.openresty_index_block} + } + + #Help Center Search + location =/wp-admin/admin-ajax.php { + proxy_pass ${var.default_server_host}; + proxy_set_header Host %{if var.default_server_host_override_proxy_host != ""}${var.default_server_host_override_proxy_host}%{else}$http_host%{endif}; + proxy_set_header X-Original-URI $request_uri; + } + + #Disable path + location ~* ^/(wp-admin|wp-login.php|readme.html|xmlrpc.php){ + deny all; + } + + location =/ { + proxy_set_header Host %{if var.default_server_host_override_proxy_host != ""}${var.default_server_host_override_proxy_host}%{else}$http_host%{endif}; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_cache off; + set $cms 0; + ${var.openresty_index_block} + + if ($uri ~* wp-login.php$){ + proxy_pass ${var.default_server_host}; + set $cms 1; + } + if ($args ~* home=1){ + proxy_pass ${var.default_server_host}; + set $cms 1; + } + + if ($http_referer ~* "wp-admin"){ + proxy_pass ${var.default_server_host}; + set $cms 1; + } + + if ($http_user_agent ~* "qihoobot|Baiduspider|Googlebot|Googlebot-Mobile|Googlebot-Image|Mediapartners-Google|Adsbot-Google|Feedfetjauntycher-Google|Yahoo! Slurp|Yahoo! Slurp China|YoudaoBot|Sosospider|Sogouspider|Sogou web spider|MSNBot|ia_archiver|Tomato Bot"){ + proxy_pass ${var.default_server_host}; + set $cms 1; + } + + if ($cms = 0){ + %{if var.default_server_host_override_proxy_host != ""~} + proxy_pass ${var.default_server_host}; + %{else} + proxy_pass http://web-server; + add_header Content-Type text/html; + add_header 'Cache-Control' 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'; + expires off; + %{endif~} + } + } + + #Allow embedded routes + location ~* ^/(login|share|404|widget-stage|embed) { + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + proxy_pass http://web-server; + add_header 'Cache-Control' 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'; + } + + #Allow embedded routes and cache + location ~* ^/(custom|file) { + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_connect_timeout 300; + proxy_pass http://web-server; + add_header 'Cache-Control' 'public, max-age=2592000'; + proxy_redirect off; + proxy_cache cache_one; + proxy_cache_valid 200 302 1h; + proxy_cache_valid 301 1d; + proxy_cache_valid any 1m; + expires 7d; + } + + #Prevent external sites from embedding routes + location ~* ^/(space|user|invite|template|workbench|org|management|notify) { + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + proxy_pass http://web-server; + add_header 'Cache-Control' 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'; + add_header 'X-Frame-Options' 'SAMEORIGIN'; + } + + location /_next { + proxy_pass http://web-server; + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + # Static resources (config_map_kong will overwrite this in saas environment) + location /web_build { + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass http://web-server/web_build; + add_header 'Cache-Control' 'public, max-age=2592000'; + proxy_redirect off; + proxy_cache cache_one; + proxy_cache_valid 200 302 1h; + proxy_cache_valid 301 1d; + proxy_cache_valid any 1m; + expires 7d; + } + EOT + + "lbs-amap.conf" = <<-EOT + %{if var.lbs_amap_secret != ""~} + location /_AMapService/v4/map/styles { + set $args "$args&jscode=${var.lbs_amap_secret}"; + proxy_pass https://webapi.amap.com/v4/map/styles; + } + + location /_AMapService/v3/vectormap { + set $args "$args&jscode=${var.lbs_amap_secret}"; + proxy_pass https://fmap01.amap.com/v3/vectormap; + } + + location /_AMapService/ { + set $args "$args&jscode=${var.lbs_amap_secret}"; + proxy_pass https://restapi.amap.com/; + } + %{else} + # + %{endif~} + + EOT + + #Robots + "disable_robots.txt" = <<-EOT + User-agent: * + Disallow: / + EOT + "enable_robots.txt" = <<-EOT + User-agent: * + Disallow: /wp-admin/ + Allow: /wp-admin/admin-ajax.php + EOT + + "static-proxy.conf" = <<-EOT + %{if var.developers_redirect_url != ""~} + location ^~ /developers { + rewrite ^/developers/?(.*)$ ${var.developers_redirect_url}/$1 permanent; + } + %{endif~} + location /pricing { + proxy_pass ${var.pricing_host}; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + # pricing nextjs static resources + location /pricing/_next { + proxy_pass ${var.pricing_host}/_next; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + add_header 'Cache-Control' 'public, max-age=2592000'; + proxy_redirect off; + proxy_cache cache_one; + proxy_cache_valid 200 302 1h; + proxy_cache_valid 301 1d; + proxy_cache_valid any 1m; + expires 7d; + } + EOT + + "imageproxy-server.conf" = <<-EOT + location /${var.public_assets_bucket} { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # enable cache + proxy_cache cache_one; + proxy_cache_valid 200 302 3h; + proxy_cache_valid any 1m; + + proxy_connect_timeout 300; + chunked_transfer_encoding off; + proxy_pass http://imageproxy-server/image/${var.public_assets_bucket}/; + if ( $args !~* ^imageView ){ + proxy_pass ${var.minio_host}; + } + + if ( $request_method = PUT ){ + proxy_pass ${var.minio_host}; + } + + } + location ~* ^/(minio) { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + proxy_pass ${var.minio_host}; + } + + + location /oss { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + proxy_pass ${var.minio_host}/${var.public_assets_bucket}/; + } + + location =/${var.public_assets_bucket} { + return 403; + } + EOT + + # Empty config, used to populate the config by default + "blank.config" = "" + + "openresty_extra_config.conf" = var.openresty_extra_config + + } +} + diff --git a/modules/app/openresty.secret.tf b/modules/app/openresty.secret.tf new file mode 100644 index 0000000..9ca00d7 --- /dev/null +++ b/modules/app/openresty.secret.tf @@ -0,0 +1,39 @@ +resource "kubernetes_secret" "openresty-ssl-certs" { + depends_on = [kubernetes_namespace.this] + metadata { + name = "openresty-ssl-certs" + namespace = var.namespace + annotations = { + "domain" = var.server_name + } + } + + data = { + "tls.crt" = var.tls_crt + "tls.key" = var.tls_key + } + + type = "IngressTLS" +} + +#extend certs for openresty +resource "kubernetes_secret" "openresty-extend-ssl-certs" { + count = var.has_extend_tls ? 1 : 0 + depends_on = [kubernetes_namespace.this] + metadata { + name = "openresty-extend-ssl-certs" + namespace = var.namespace + annotations = { + "domain" = var.extend_tls_data.tls_domain + } + } + + + data = { + "tls.crt" = var.extend_tls_data.tls_crt + "tls.key" = var.extend_tls_data.tls_key + } + + type = "IngressTLS" +} + diff --git a/modules/app/openresty.svc.tf b/modules/app/openresty.svc.tf new file mode 100644 index 0000000..a6d71ae --- /dev/null +++ b/modules/app/openresty.svc.tf @@ -0,0 +1,50 @@ + +resource "kubernetes_service" "openresty_server" { + count = var.has_openresty ? 1 : 0 + + lifecycle { + ignore_changes = [ + metadata[0].labels, + spec[0].port[0], + spec[0].port[1] + ] + } + + metadata { + name = "openresty-server" + namespace = var.namespace + + labels = { + # "service.beta.kubernetes.io/hash" = "d233bf00296726abd8c8fd741e86521efe44c7278d05368fdb56acb6" + } + + annotations = var.openresty_annotations + + # finalizers = ["service.k8s.alibaba/resources"] + } + + spec { + port { + name = "http-80" + protocol = "TCP" + port = 80 + target_port = "80" + } + + port { + name = "http-443" + protocol = "TCP" + port = 443 + target_port = "443" + } + + selector = { + app = "openresty" + } + + type = var.has_load_balancer ? "LoadBalancer" : "NodePort" + session_affinity = "None" + ip_families = ["IPv4"] + external_traffic_policy = var.has_load_balancer ? "Local" : null + } +} diff --git a/modules/app/openresty.vars.tf b/modules/app/openresty.vars.tf new file mode 100644 index 0000000..3ae80a2 --- /dev/null +++ b/modules/app/openresty.vars.tf @@ -0,0 +1,121 @@ +variable "job_admin_server_host" { + type = string + default = "job-admin-server" +} + + +variable "disallow_robots" { + type = bool + default = true + description = "Whether to disable crawlers" +} + +variable "default_server_host" { + type = string + default = "http://web-server" + description = "Default route processing service" +} + +variable "lbs_amap_secret" { + type = string + default = "" + description = "Gaode map reverse proxy security key" +} + +variable "minio_host" { + type = string + default = "http://minio.apitable-datacenter:9090" + description = "Object storage service address" +} + +variable "server_name" { + type = string + default = "vika.ltd" + description = "default domain name" +} + +variable "enable_ssl" { + type = bool + default = false + description = "Whether to enable ssl" +} + +variable "tls_name" { + type = string + default = "" + description = "tls cert name" +} + +variable "tls_crt" { + type = string + default = "" + description = "tls cert body" +} + +variable "tls_key" { + type = string + default = "" + description = "tls key body" +} + + + +variable "openresty_annotations" { + type = map(any) + description = "openresty annotation, used to control load balancing specifications, slb.s1.small(5k), slb.s3.small(20w) / slb.s3.large(100w)" + default = { + "service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec" = "slb.s1.small" + } +} + +variable "openresty_extra_config" { + type = string + default = "" + description = "nginx (openresty) external configuration file, which belongs to http internal level" +} + +variable "openresty_server_block" { + type = string + default = "" + description = "nginx (openresty) external configuration file, which belongs to the internal configuration of service" +} + +variable "worker_processes" { + default = "auto" + description = "nginx(openresty) worker_processes CPU core number, the corresponding CPU core number in the old version k8s" +} + + +variable "pricing_host" { + type = string + default = "http://pricing.apitable-mkt" + description = "pricing server" +} + +variable "openresty_index_block" { + type = string + default = "" + description = "Homepage URI =/, support nginx, lua code block" +} + +variable "developers_redirect_url" { + type = string + default = "" +} + + +variable "has_extend_tls" { + description = "Whether to support extended certificate" + type = bool + default = false +} + +variable "extend_tls_data" { + description = "Extended certificate crt and key contents" + type = map(any) + default = { + tls_crt = "" + tls_key = "" + tls_domain = "" + } +} \ No newline at end of file diff --git a/modules/app/outputs.tf b/modules/app/outputs.tf new file mode 100644 index 0000000..9f87735 --- /dev/null +++ b/modules/app/outputs.tf @@ -0,0 +1,9 @@ + +#Output ingress ip +output "ingress_ip" { + value = var.has_load_balancer ? kubernetes_service.openresty_server[0].status[0].load_balancer[0].ingress[0].ip : "" +} + +output "ingress_ip_alternative" { + value = var.has_load_balancer ? try(kubernetes_service.openresty_server[0].status[0].load_balancer[0].ingress[1].ip, "") : "" +} diff --git a/modules/app/room_server.tf b/modules/app/room_server.tf new file mode 100644 index 0000000..776dba5 --- /dev/null +++ b/modules/app/room_server.tf @@ -0,0 +1,233 @@ +locals { + default_room_server_resources = { + replicas = "2" + max_replicas = "20" + requests_cpu = "100m" + requests_memory = "512Mi" + limits_cpu = "1500m" + limits_memory = "8192Mi" + rolling_update_max_unavailable = "25%" + rolling_update_max_surge = "25%" + cpu_utilization_percentage = "500" + } +} + +locals { + room_server_resources = merge(local.default_room_server_resources, lookup(var.resources, "room_server", {})) +} + +resource "kubernetes_deployment" "room_server" { + wait_for_rollout = var.is_wait + count = var.has_room_server ? 1 : 0 + + depends_on = [ + kubernetes_deployment.backend_server, + kubernetes_namespace.this + ] + + metadata { + name = "room-server" + namespace = var.namespace + + labels = { + app = "room-server" + } + + } + + spec { + replicas = local.room_server_resources["replicas"] + + selector { + match_labels = { + app = "room-server" + } + } + + template { + metadata { + labels = { + app = "room-server" + } + + annotations = { + # redeploy-timestamp = "1655781881366" + "configmap.room-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.room_server_env.data)) : "not_enabled" + } + } + + spec { + + node_selector = var.node_selector + + container { + name = "room-server" + image = "${var.registry}/${lookup(local.image_namespaces, "room_server")}/room-server:${lookup(local.image_tags, "room_server")}" + + env_from { + config_map_ref { + name = "room-server-env" + } + } + + resources { + requests = { + cpu = local.room_server_resources["requests_cpu"] + memory = local.room_server_resources["requests_memory"] //@add_tf_local + } + limits = { + cpu = local.room_server_resources["limits_cpu"] //@add_tf_local + memory = local.room_server_resources["limits_memory"] //@add_tf_local + } + } + + liveness_probe { + http_get { + path = "/actuator/health" + port = "3333" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 45 + success_threshold = 1 + failure_threshold = 3 + } + + readiness_probe { + http_get { + path = "/actuator/health" + port = "3333" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 15 + success_threshold = 1 + failure_threshold = 3 + } + + startup_probe { + http_get { + path = "/actuator/health" + port = "3333" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 1 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + volume_mount { + name = "room-server-1658224800000" + mount_path = "/home/vikadata/packages/room-server/logs" + } + + volume_mount { + name = "room-server-1658224800000" + mount_path = "/app/packages/room-server/logs" + } + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + image_pull_policy = var.image_pull_policy + } + + volume { + name = "room-server-1658224800000" + empty_dir {} + } + + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = local.room_server_resources["rolling_update_max_unavailable"] + max_surge = local.room_server_resources["rolling_update_max_surge"] + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } + + //Whether to ignore the change of image tag. + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + ] + } +} + +resource "kubernetes_service" "room_server" { + count = var.has_room_server ? 1 : 0 + metadata { + name = "room-server" + namespace = var.namespace + } + + spec { + port { + name = "room-server-3333-3333" + protocol = "TCP" + port = 3333 + target_port = "3333" + } + + port { + name = "room-server-3334-3334" + protocol = "TCP" + port = 3334 + target_port = "3334" + } + + port { + name = "room-server-3006-3006" + protocol = "TCP" + port = 3006 + target_port = "3006" + } + + selector = { + app = "room-server" + } + + type = "ClusterIP" + session_affinity = "None" + ip_families = ["IPv4"] + } +} + +resource "kubernetes_horizontal_pod_autoscaler" "room_server_autoscaler" { + count = var.has_room_server ? 1 : 0 + metadata { + name = "room-server-autoscaler-v2beta2" + namespace = var.namespace + } + + spec { + min_replicas = local.room_server_resources["replicas"] + max_replicas = local.room_server_resources["max_replicas"] + + scale_target_ref { + api_version = "apps/v1" + kind = "Deployment" + name = "room-server" + } + target_cpu_utilization_percentage = local.room_server_resources["cpu_utilization_percentage"] + } +} diff --git a/modules/app/room_server_env.tf b/modules/app/room_server_env.tf new file mode 100644 index 0000000..b7584e9 --- /dev/null +++ b/modules/app/room_server_env.tf @@ -0,0 +1,31 @@ +locals { + room_server_env = merge(local.env_config, { + WEB_SOCKET_CHANNEL_ENV = var.namespace + APPLICATION_NAME = "ROOM_SERVER" + SERVER_PORT = "3333" + INSTANCE_COUNT = "1" + LOG_LEVEL = "info" + BACKEND_BASE_URL = "http://backend-server:8081/api/v1/" + SOCKET_GRPC_URL = "socket-server:3007" + OSS_HOST = "/assets" + OSS_TYPE = "QNY1" + ZIPKIN_ENABLED = "false" + ROBOT_OFFICIAL_SERVICE_SLUG = "vika" + DEFAULT_LANGUAGE = "en-US" + }, lookup(var.envs, "room_server", {})) +} + +resource "kubernetes_config_map" "room_server_env" { + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "room-server-env" + namespace = var.namespace + + annotations = { + } + } + + data = local.room_server_env +} diff --git a/modules/app/scheduler_server.app.tf b/modules/app/scheduler_server.app.tf new file mode 100644 index 0000000..a7ed60c --- /dev/null +++ b/modules/app/scheduler_server.app.tf @@ -0,0 +1,130 @@ +locals { + default_scheduler_server_resources = { + replicas = "1" + requests_cpu = "100m" + requests_memory = "1024Mi" + limits_cpu = "1500m" + limits_memory = "4096Mi" + rolling_update_max_unavailable = "0%" + rolling_update_max_surge = "25%" + } +} + +locals { + scheduler_server_resources = merge(local.default_scheduler_server_resources, lookup(var.resources, "scheduler_server", {})) +} + +resource "kubernetes_deployment" "scheduler_server" { + wait_for_rollout = var.is_wait + count = var.has_scheduler_server ? 1 : 0 + + depends_on = [ + kubernetes_namespace.this + ] + + metadata { + name = "scheduler-server" + namespace = var.namespace + + labels = { + app = "scheduler-server" + } + + } + + spec { + replicas = local.scheduler_server_resources["replicas"] + + selector { + match_labels = { + app = "scheduler-server" + } + } + + template { + metadata { + labels = { + app = "scheduler-server" + } + + annotations = { + # redeploy-timestamp = "1655781881366" + "configmap.scheduler-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.scheduler_server_env.data)) : "not_enabled" + + } + } + + spec { + + node_selector = var.node_selector + + container { + name = "scheduler-server" + image = "${var.registry}/${lookup(local.image_namespaces, "room_server")}/room-server:${lookup(local.image_tags, "room_server")}" + + env_from { + config_map_ref { + name = "scheduler-server-env" + } + } + + resources { + requests = { + cpu = local.scheduler_server_resources["requests_cpu"] + memory = local.scheduler_server_resources["requests_memory"] //@add_tf_local + } + limits = { + cpu = local.scheduler_server_resources["limits_cpu"] //@add_tf_local + memory = local.scheduler_server_resources["limits_memory"] //@add_tf_local + } + } + + volume_mount { + name = "scheduler-server-1658224800000" + mount_path = "/home/vikadata/packages/room-server/logs" + } + + volume_mount { + name = "scheduler-server-1658224800000" + mount_path = "/app/packages/room-server/logs" + } + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + image_pull_policy = var.image_pull_policy + } + + volume { + name = "scheduler-server-1658224800000" + empty_dir {} + } + + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = local.scheduler_server_resources["rolling_update_max_unavailable"] + max_surge = local.scheduler_server_resources["rolling_update_max_surge"] + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } + + //Whether to ignore the change of image tag. + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + ] + } +} diff --git a/modules/app/scheduler_server.svc.tf b/modules/app/scheduler_server.svc.tf new file mode 100644 index 0000000..fcdfce3 --- /dev/null +++ b/modules/app/scheduler_server.svc.tf @@ -0,0 +1,30 @@ + +resource "kubernetes_service" "scheduler_server" { + count = var.has_scheduler_server ? 1 : 0 + + depends_on = [ + kubernetes_namespace.this + ] + + metadata { + name = "scheduler-server" + namespace = var.namespace + } + + spec { + port { + name = "scheduler-server-3333-3333" + protocol = "TCP" + port = 3333 + target_port = "3333" + } + + selector = { + app = "scheduler-server" + } + + type = "ClusterIP" + session_affinity = "None" + ip_families = ["IPv4"] + } +} diff --git a/modules/app/scheduler_server_env.tf b/modules/app/scheduler_server_env.tf new file mode 100644 index 0000000..6bae39a --- /dev/null +++ b/modules/app/scheduler_server_env.tf @@ -0,0 +1,27 @@ +locals { + scheduler_server_env = merge(local.env_config, { + ENABLE_SCHED = "true" + APPLICATION_NAME = "SCHEDULE_SERVER" + INSTANCE_COUNT = "1" + LOG_LEVEL = "info" + BACKEND_BASE_URL = "http://backend-server:8081/api/v1/" + SOCKET_GRPC_URL = "socket-server:3007" + OSS_TYPE = "QNY1" + ZIPKIN_ENABLED = "false" + }, lookup(var.envs, "scheduler_server", {})) +} + +resource "kubernetes_config_map" "scheduler_server_env" { + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "scheduler-server-env" + namespace = var.namespace + + annotations = { + } + } + + data = local.scheduler_server_env +} diff --git a/modules/app/socket_server.tf b/modules/app/socket_server.tf new file mode 100644 index 0000000..4c279fb --- /dev/null +++ b/modules/app/socket_server.tf @@ -0,0 +1,218 @@ +locals { + default_socket_server_resources = { + replicas = "1" + requests_cpu = "100m" + requests_memory = "512Mi" + limits_cpu = "1000m" + limits_memory = "2048Mi" + rolling_update_max_unavailable = "25%" + rolling_update_max_surge = "25%" + } +} + +locals { + socket_server_resources = merge(local.default_socket_server_resources, lookup(var.resources, "socket_server", {})) +} + +resource "kubernetes_deployment" "socket_server" { + wait_for_rollout = var.is_wait + count = var.has_socket_server ? 1 : 0 + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "socket-server" + namespace = var.namespace + + labels = { + app = "socket-server" + } + + } + + spec { + replicas = local.socket_server_resources["replicas"] + + selector { + match_labels = { + app = "socket-server" + } + } + + template { + metadata { + labels = { + app = "socket-server" + } + + annotations = { + # redeploy-timestamp = "1655781881366" + "configmap.socket-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.socket_server_env.data)) : "not_enabled" + } + } + + spec { + + node_selector = var.node_selector + + container { + name = "socket-server" + image = "${var.registry}/${lookup(local.image_namespaces, "room_server")}/room-server:${lookup(local.image_tags, "socket_server")}" + image_pull_policy = var.image_pull_policy + + env_from { + config_map_ref { + name = "socket-server-env" + } + } + + resources { + requests = { + cpu = local.socket_server_resources["requests_cpu"] + memory = local.socket_server_resources["requests_memory"] //@add_tf_local + } + limits = { + cpu = local.socket_server_resources["limits_cpu"] //@add_tf_local + memory = local.socket_server_resources["limits_memory"] //@add_tf_local + } + } + + liveness_probe { + http_get { + path = "/socket/health" + port = "3001" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 30 + success_threshold = 1 + failure_threshold = 3 + } + + readiness_probe { + http_get { + path = "/socket/health" + port = "3001" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 15 + success_threshold = 1 + failure_threshold = 3 + } + + startup_probe { + http_get { + path = "/socket/health" + port = "3001" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 1 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + volume_mount { + name = "socket-server-1658224800000" + mount_path = "/app/packages/socket-server/logs" + } + + volume_mount { + name = "socket-server-1658224800000" + mount_path = "/app/packages/room-server/logs" + } + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + } + + volume { + name = "socket-server-1658224800000" + empty_dir {} + } + + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = local.socket_server_resources["rolling_update_max_unavailable"] + max_surge = local.socket_server_resources["rolling_update_max_surge"] + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } + + //Whether to ignore the change of image tag. + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + ] + } +} + +resource "kubernetes_service" "socket_server" { + count = var.has_socket_server ? 1 : 0 + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "socket-server" + namespace = var.namespace + } + + spec { + port { + name = "socket-server-3001-3001" + protocol = "TCP" + port = 3001 + target_port = "3001" + } + + port { + name = "socket-server-3002-3002" + protocol = "TCP" + port = 3002 + target_port = "3002" + } + + port { + name = "socket-server-3005-3005" + protocol = "TCP" + port = 3005 + target_port = "3005" + } + + port { + name = "socket-server-3007-3007" + protocol = "TCP" + port = 3007 + target_port = "3007" + } + + selector = { + app = "socket-server" + } + + type = "ClusterIP" + session_affinity = "None" + ip_families = ["IPv4"] + } +} diff --git a/modules/app/socket_server_env.tf b/modules/app/socket_server_env.tf new file mode 100644 index 0000000..4a0f3e5 --- /dev/null +++ b/modules/app/socket_server_env.tf @@ -0,0 +1,38 @@ +locals { + socket_server_env = merge(local.env_config, { + WEB_SOCKET_CHANNEL_ENV = var.namespace + APPLICATION_NAME = "SOCKET_SERVER" + ENABLE_SOCKET = "true" + SERVER_PORT = "3001" + + INSTANCE_COUNT = "1" + LOG_LEVEL = "info" + + NEST_HEALTH_CHECK_CRON_EXPRESSION = "*/3 * * * * *" + NEST_HEALTH_CHECK_TIMEOUT = "1000" + #Deprecate: NEST_GRPC_URL => ROOM_GRPC_URL + NEST_GRPC_URL = "room-server:3334" + ROOM_GRPC_URL = "room-server:3334" + #Deprecate: BACKEND_GRPC_URL => SOCKET_BACKEND_GRPC_URL + BACKEND_GRPC_URL = "backend-server:8083" + SOCKET_BACKEND_GRPC_URL = "backend-server:8083" + GRPC_TIMEOUT_MAX_TIMES = "3" + NODE_MEMORY_RATIO = "80" + + }, lookup(var.envs, "socket_server", {})) +} + +resource "kubernetes_config_map" "socket_server_env" { + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "socket-server-env" + namespace = var.namespace + + annotations = { + } + } + + data = local.socket_server_env +} diff --git a/modules/app/variables.tf b/modules/app/variables.tf new file mode 100644 index 0000000..0bf3c75 --- /dev/null +++ b/modules/app/variables.tf @@ -0,0 +1,252 @@ +variable "namespace" { + type = string + default = "apitable-app" + description = "Shared namespace for applications" +} + +variable "create_ns" { + default = true + type = bool + description = "Whether to automatically create namespace" +} + +variable "has_load_balancer" { + description = "Does it come with Load Balancer? OpenResty exposes IP if any" + type = bool + default = false +} + + +variable "has_cron_job" { + description = "Whether it has a timed task job?" + type = bool + default = true +} + +variable "has_openresty" { + description = "Does it come with an openresty gateway? With public IP and load balancing" + type = bool + default = true +} + +variable "has_openresty_ofelia_job" { + description = "whether to bring a lightweight OfeliaJob Container?" + type = bool + default = false +} + +variable "has_backend_server" { + description = "Whether to deploy Java-Api service?" + type = bool + default = true +} + +variable "has_sensors_filebeat" { + default = true + type = bool + description = "Whether to enable Sensors data collection" +} + +variable "has_job_admin_server" { + description = "Whether to deploy XXL-JOB-Admin service?" + type = bool + default = false +} + +variable "has_space_job_executor" { + description = "Whether to deploy XXL-JO-workbench task executor service?" + type = bool + default = false +} + +variable "has_bill_job_executor" { + description = "Whether to deploy XXL-JOB-subscription task executor service?" + type = bool + default = false +} + +variable "has_room_server" { + description = "Whether to deploy the Node-Nest.js-Room-Server service?" + type = bool + default = true +} + +variable "has_nest_rest_server" { + description = "/dataPack API only, would be removed after publishing Galaxy version" + type = bool + default = false +} + +variable "has_fusion_server" { + description = "Whether to deploy the Node-Nest.js-Fusion-Api-Server service?" + type = bool + default = true +} + +variable "has_scheduler_server" { + description = "Whether to deploy the Node-Nest.js-Scheduler-Server service?" + type = bool + default = true +} + +variable "has_socket_server" { + description = "Whether to deploy the Node-Nest.js-Socket-Server service?" + type = bool + default = true +} + +variable "has_document_server" { + description = "Whether to deploy the Node-Nest.js-Document-Server service?" + type = bool + default = false +} + +variable "has_web_server" { + description = "Whether to deploy Web-Server (front-end template) service?" + type = bool + default = true +} + +variable "has_migration_server" { + description = "Whether to deploy Java-Data Migration Service?" + type = bool + default = false +} + +variable "has_imageproxy_server" { + description = "Whether to deploy the Go image clipping service?" + type = bool + default = false +} + +variable "has_dingtalk_server" { + description = "Whether to deploy DingTalk application integration service?" + type = bool + default = false +} + +variable "has_ai_server" { + description = "Whether to deploy AI server?" + type = bool + default = false +} + +variable "has_databus_server" { + description = "Deploy the databus-server?" + type = bool + default = true +} + +variable "publish_databus_server" { + description = "Publish the databus-server ?" + type = bool + default = true +} + +variable "has_auto_reloaded_config_map" { + description = "Modify the configMap whether to automatically restart pods?" + type = bool + default = false +} + +variable "registry" { + description = "The dockerHub, the default is ghcr.io of github, the Vika accelerator is ghcr.vikadata.com, and the private warehouse is docker.vika.ltd" + type = string + default = "ghcr.io" +} + +variable "image_tag" { + description = "What version of the container image tag to use when initializing" + type = string + default = "latest-alpha" +} + +variable "image_tags" { + description = "During initialization, you can freely control different container services, which tags are used respectively, and if any, overwrite image_tag. It is recommended that convention is better than configuration. Make corresponding branches in each project, and use the last image_tag variable for global unification instead of configuring here. In addition, it should be noted that the variables here are all underscored, such as the container service backend-server, the variables here correspond to backend_server, and match the terraform variable naming practice" + type = map(any) + default = {} +} + +variable "image_namespace" { + description = "What namespace container image to use when initializing" + type = string + default = "vikadata/vika" +} + +variable "image_namespaces" { + description = "During initialization, you can freely control different container services, which namespaces are used respectively, and if any, overwrite image_namespace. It is recommended that convention is better than configuration, and corresponding branches should be made in each project" + type = map(any) + default = {} +} + +variable "env" { + description = "environment variable" + type = map(any) + default = {} +} + +variable "envs" { + description = "Environment variables, submodules replace .env" + #type = map(any) + default = { + } +} + +variable "resources" { + description = "How many resources are used for different services? Including copy, CPU, and memory, the unit is MB. limit is the modified value × 2, and each environment has the default value of the minimum unit to start" + type = any + default = { + } +} + +variable "image_pull_policy" { + type = string + default = "IfNotPresent" +} + +variable "is_wait" { + type = bool + default = true +} + +variable "public_assets_bucket" { + type = string + default = "vk-assets-ltd" +} + +variable "default_server_host_override_proxy_host" { + type = string + default = "" +} + +## Deprecate +variable "docker_edition" { + type = string + default = "vika" +} + +variable "node_selector" { + default = { + } + description = "Node node label selector" +} + +variable "ai_server_sc" { + default = { + size = "10Pi" + volume_attributes = { + subPath = "ai_server" + } + } + description = "ai_server storage class" +} + +variable "pv_csi" { + default = { + namespace = "vika-opsbase" + driver = "csi.juicefs.com" + fs_type = "juicefs" + node_publish_secret_ref = "juicefs-sc-secret" + } + description = "csi storage namespace" +} \ No newline at end of file diff --git a/modules/app/versions.tf b/modules/app/versions.tf new file mode 100644 index 0000000..e8f6db1 --- /dev/null +++ b/modules/app/versions.tf @@ -0,0 +1,24 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.16.1" + } + } +} + +resource "kubernetes_namespace" "this" { + count = var.create_ns ? 1 : 0 + metadata { + name = var.namespace + } +} + +data "kubernetes_namespace" "this" { + count = var.create_ns ? 0 : 1 + metadata { + name = var.namespace + } +} diff --git a/modules/app/web_server.app.tf b/modules/app/web_server.app.tf new file mode 100644 index 0000000..76b62e8 --- /dev/null +++ b/modules/app/web_server.app.tf @@ -0,0 +1,177 @@ +resource "kubernetes_deployment" "web_server" { + wait_for_rollout = var.is_wait + count = var.has_web_server ? 1 : 0 + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "web-server" + namespace = var.namespace + + labels = { + app = "web-server" + } + + } + + spec { + replicas = local.web_server_resources["replicas"] + + selector { + match_labels = { + app = "web-server" + } + } + + template { + metadata { + labels = { + app = "web-server" + } + + annotations = { + # redeploy-timestamp = "1655781881366" + "configmap.web-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.web_server_env.data)) : "not_enabled" + } + } + + spec { + + node_selector = var.node_selector + + init_container { + name = "init-settings" + image = "${var.registry}/${lookup(local.image_namespaces, "init_settings")}/init-settings:${lookup(local.image_tags, "init_settings")}" + image_pull_policy = var.image_pull_policy + command = [ + "sh", "-c", + "[ -d /settings ] && cp -afr /settings/* /tmp/vika" + ] + security_context { + allow_privilege_escalation = false + run_as_user = "0" + } + volume_mount { + mount_path = "/tmp/vika" + name = "settings" + sub_path = "settings" + } + } + + volume { + name = "settings" + empty_dir {} + } + + container { + name = "web-server" + image = "${var.registry}/${lookup(local.image_namespaces, "web_server")}/web-server:${lookup(local.image_tags, "web_server")}" + + env_from { + config_map_ref { + name = "web-server-env" + } + } + + volume_mount { + mount_path = "/tmp/vika" + name = "settings" + sub_path = "settings" + } + + resources { + requests = { + cpu = local.web_server_resources["requests_cpu"] //@add_tf_local + memory = local.web_server_resources["requests_memory"] //@add_tf_local + } + limits = { + cpu = local.web_server_resources["limits_cpu"] //@add_tf_local + memory = local.web_server_resources["limits_memory"] //@add_tf_local + } + } + + # Detect whether the application is healthy, delete and restart the container if it is unhealthy + liveness_probe { + http_get { + path = "/api/actuator/health" + port = "8080" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 45 + success_threshold = 1 + failure_threshold = 3 + } + + # Detect whether the application is ready and in normal service state, if it is not normal, it will not receive traffic from Kubernetes Service + readiness_probe { + http_get { + path = "/api/actuator/health" + port = "8080" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 3 + period_seconds = 15 + success_threshold = 1 + failure_threshold = 3 + } + + # Detect whether the application is started. If it is not ready within the failureThreshold*periodSeconds period, the application process will be restarted + startup_probe { + http_get { + path = "/api/actuator/health" + port = "8080" + scheme = "HTTP" + } + + initial_delay_seconds = 10 + timeout_seconds = 1 + period_seconds = 10 + success_threshold = 1 + failure_threshold = 3 + } + + command = local.web_server_resources["command"] + + security_context { + allow_privilege_escalation = false + run_as_user = "0" + } + + termination_message_path = "/dev/termination-log" + termination_message_policy = "File" + image_pull_policy = var.image_pull_policy + } + + image_pull_secrets { + name = "regcred" + } + restart_policy = "Always" + termination_grace_period_seconds = 30 + dns_policy = "ClusterFirst" + } + } + + strategy { + type = "RollingUpdate" + + rolling_update { + max_unavailable = local.web_server_resources["rolling_update_max_unavailable"] + max_surge = local.web_server_resources["rolling_update_max_surge"] + } + } + + revision_history_limit = 10 + progress_deadline_seconds = 600 + } + //Whether to ignore the change of image tag. + lifecycle { + ignore_changes = [ + spec[0].template[0].spec[0].affinity, + ] + } +} diff --git a/modules/app/web_server.env.tf b/modules/app/web_server.env.tf new file mode 100644 index 0000000..89b996c --- /dev/null +++ b/modules/app/web_server.env.tf @@ -0,0 +1,52 @@ +locals { + web_server_env = merge(local.env_config, { + + API_PROXY = "http://backend-server:8081" + BACKEND_INFO_URL = "http://backend-server:8081/api/v1/client/info" + TEMPLATE_PATH = "./static/web_build/index.html" + WEB_SERVER_PORT = "8080" + + + }, lookup(var.envs, "web_server", {})) +} + +resource "kubernetes_config_map" "web_server_env" { + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "web-server-env" + namespace = var.namespace + + annotations = { + } + } + + data = local.web_server_env +} + +//Declare the minimum resource to start the service. +locals { + default_web_server_resources = { + replicas = "1" + // ⚠️ Note: requests are required resources, not limits limit resources 200m = 0.2CPU, 1000m = 1CPU, 1 = 1CPU, 0.1 = 100m + // There is currently no setting for limits + // see: https://kubesphere.io/zh/blogs/deep-dive-into-the-k8s-request-and-limit + requests_cpu = "200m" + // Mi,Gi + requests_memory = "512Mi" + limits_cpu = "1000m" + limits_memory = "2048Mi" + rolling_update_max_unavailable = "0%" + rolling_update_max_surge = "25%" + //Change startup items and inject environment variables + command = [ + "/bin/sh", "-c", + "[ -f /tmp/vika/run.sh ] && sh /tmp/vika/run.sh ; node server.js" + ] + } +} +// 合并单独命名空间(环境)声明的资源参数 +locals { + web_server_resources = merge(local.default_web_server_resources, lookup(var.resources, "web_server", {})) +} diff --git a/modules/app/web_server.svc.tf b/modules/app/web_server.svc.tf new file mode 100644 index 0000000..479561b --- /dev/null +++ b/modules/app/web_server.svc.tf @@ -0,0 +1,27 @@ + +resource "kubernetes_service" "web_server" { + count = var.has_web_server ? 1 : 0 + depends_on = [ + kubernetes_namespace.this + ] + metadata { + name = "web-server" + namespace = var.namespace + } + + spec { + port { + protocol = "TCP" + port = 8080 + target_port = "8080" + } + + selector = { + app = "web-server" + } + + type = "ClusterIP" + session_affinity = "None" + ip_families = ["IPv4"] + } +} \ No newline at end of file diff --git a/modules/datacenter/README.md b/modules/datacenter/README.md new file mode 100644 index 0000000..336480d --- /dev/null +++ b/modules/datacenter/README.md @@ -0,0 +1,64 @@ + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [helm](#requirement\_helm) | >= 2.8.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.16.1 | + +## Providers + +| Name | Version | +|------|---------| +| [helm](#provider\_helm) | >= 2.8.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.16.1 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [helm_release.minio](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.mysql](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.rabbitmq](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.redis](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [kubernetes_namespace.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/data-sources/namespace) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [chart\_registry](#input\_chart\_registry) | Default chart-registry address | `string` | `"docker.io"` | no | +| [chart\_registrys](#input\_chart\_registrys) | During initialization, you can freely control different container services, which registry to use, and if any, override the registry. Convention is recommended over configuration. | `map` | `{}` | no | +| [chart\_repository](#input\_chart\_repository) | Default helm warehouse address | `string` | `"https://charts.bitnami.com/bitnami"` | no | +| [chart\_repositorys](#input\_chart\_repositorys) | When initializing the chart, you can freely control different container services, which repository is used respectively, and if there is one, override the repository. Convention is recommended over configuration. | `map` | `{}` | no | +| [create\_ns](#input\_create\_ns) | Whether to automatically create namespace | `bool` | `true` | no | +| [default\_storage\_class\_name](#input\_default\_storage\_class\_name) | High-performance storage type name, used to automatically generate PVC Claims, default Alibaba Cloud parameters | `string` | `"alicloud-disk-efficiency"` | no | +| [default\_storage\_size](#input\_default\_storage\_size) | High-performance storage volume size, the default Alibaba Cloud minimum is 20Gi | `string` | `"20Gi"` | no | +| [has\_filebeat](#input\_has\_filebeat) | n/a | `bool` | `false` | no | +| [has\_minio](#input\_has\_minio) | n/a | `bool` | `false` | no | +| [has\_mysql](#input\_has\_mysql) | n/a | `bool` | `true` | no | +| [has\_rabbitmq](#input\_has\_rabbitmq) | n/a | `bool` | `false` | no | +| [has\_redis](#input\_has\_redis) | n/a | `bool` | `false` | no | +| [minio\_default\_password](#input\_minio\_default\_password) | n/a | `string` | `"73VyYWygp7VakhRC6hTf"` | no | +| [minio\_resources](#input\_minio\_resources) | resource configuration | `map` |
{
"limits": {},
"requests": {}
}
| no | +| [mysql\_default\_root\_password](#input\_mysql\_default\_root\_password) | n/a | `string` | `"6sg8vgDFcwWXP386EiZB"` | no | +| [mysql\_init\_database](#input\_mysql\_init\_database) | n/a | `string` | `"apitable"` | no | +| [mysql\_init\_disk\_size](#input\_mysql\_init\_disk\_size) | n/a | `string` | `"20Gi"` | no | +| [namespace](#input\_namespace) | Note that the namespace is usually created during the process of creating the cloud storage PVC, and the namespace is not created here, only referenced | `string` | `"apitable-datacenter"` | no | +| [rabbitmq\_default\_password](#input\_rabbitmq\_default\_password) | n/a | `string` | `"7r4HVvsrwP4kQjAgj8Jj"` | no | +| [rabbitmq\_default\_user](#input\_rabbitmq\_default\_user) | n/a | `string` | `"user"` | no | +| [rabbitmq\_resources](#input\_rabbitmq\_resources) | resource limits | `map` |
{
"limits": {},
"requests": {}
}
| no | +| [redis\_default\_password](#input\_redis\_default\_password) | n/a | `string` | `"UHWCWiuUMVyupqmW4cXV"` | no | +| [redis\_disk\_size](#input\_redis\_disk\_size) | n/a | `string` | `"20Gi"` | no | + +## Outputs + +No outputs. + \ No newline at end of file diff --git a/modules/datacenter/config_chart_repositorys.tf b/modules/datacenter/config_chart_repositorys.tf new file mode 100644 index 0000000..855dc58 --- /dev/null +++ b/modules/datacenter/config_chart_repositorys.tf @@ -0,0 +1,32 @@ + +locals { + chart_repositrys = merge({ + mysql = var.chart_repository + elasticsearch = var.chart_repository + filebeat = var.chart_repository + kafka = var.chart_repository + kibana = var.chart_repository + minio = var.chart_repository + mongodb = var.chart_repository + postgresql = var.chart_repository + rabbitmq = var.chart_repository + redis = var.chart_repository + zookeeper = var.chart_repository + }, var.chart_repositorys) +} + +locals { + chart_registrys = merge({ + mysql = var.chart_registry + elasticsearch = var.chart_registry + filebeat = var.chart_registry + kafka = var.chart_registry + kibana = var.chart_registry + minio = var.chart_registry + mongodb = var.chart_registry + postgresql = var.chart_registry + rabbitmq = var.chart_registry + redis = var.chart_registry + zookeeper = var.chart_registry + }, var.chart_registrys) +} \ No newline at end of file diff --git a/modules/datacenter/minio.helm.tf b/modules/datacenter/minio.helm.tf new file mode 100644 index 0000000..26cbede --- /dev/null +++ b/modules/datacenter/minio.helm.tf @@ -0,0 +1,26 @@ +# # https://github.com/bitnami/charts/tree/master/bitnami/minio + + +resource "helm_release" "minio" { + + count = var.has_minio ? 1 : 0 + + name = "minio" + repository = lookup(local.chart_repositrys, "minio") + chart = "minio" + namespace = var.namespace + values = [ + <