diff --git a/.flake8 b/.flake8
new file mode 100644
index 000000000..cc285f26f
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,8 @@
+[flake8]
+max-line-length = 125
+
+# Style issues are handled by yapf formatting
+ignore =
+ E126, # continuation line over-indented for hanging indent
+ W503, # line break before binary operator
+ E251, # unexpected spaces around keyword / parameter equals
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000..957757d66
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,16 @@
+*.pyc
+*.pyo
+*~
+/.cache
+/.coverage
+/.project
+/.pydevproject
+/.pytest_cache/
+__pycache__/
+/build/
+/dist/
+/karapace.egg-info/
+/karapace-rpm-src.tar
+/rpm/
+/kafka_*.tgz
+/kafka_*/
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 000000000..b3062fb4e
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,27 @@
+[MASTER]
+jobs=4
+
+[MESSAGES CONTROL]
+disable=
+ bad-continuation,
+ fixme,
+ invalid-name,
+ missing-docstring,
+ too-few-public-methods,
+ too-many-arguments,
+ too-many-branches,
+ too-many-instance-attributes,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-public-methods,
+ too-many-statements,
+ too-public-methods,
+ wrong-import-order,
+
+[FORMAT]
+max-line-length=125
+
+[REPORTS]
+output-format=text
+reports=no
+score=no
diff --git a/.style.yapf b/.style.yapf
new file mode 100644
index 000000000..0805f8f4d
--- /dev/null
+++ b/.style.yapf
@@ -0,0 +1,55 @@
+[style]
+# For docs, see https://github.com/google/yapf/blob/master/README.rst
+
+based_on_style = pep8
+# Disallow splitting between dict key and dict value in multiline {"key": "value"} lines
+ALLOW_SPLIT_BEFORE_DICT_VALUE = false
+
+# Avoid adding unnecessary blank lines when nesting
+BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = false
+
+# Always add two blank lines for top-level classes and methods
+BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION = 2
+
+# These two combine consecutive ({ and }) to same line to reduce clutter
+COALESCE_BRACKETS = true
+DEDENT_CLOSING_BRACKETS = true
+
+# Line length
+COLUMN_LIMIT = 125
+
+# Try to avoid having overly long lines by having excessively large penalty for that.
+SPLIT_PENALTY_EXCESS_CHARACTER = 1000000000
+
+# Always split dict entries to one entry per line
+# EACH_DICT_ENTRY_ON_SEPARATE_LINE = true
+
+# Never split this comment to a separate line. Workaround for certain flake8 & email template lines
+I18N_COMMENT = # noqa
+
+# Allow automatically joining lines, for example, multiline if that would fit to a single line
+JOIN_MULTIPLE_LINES = true
+
+# "3 * 5", instead of "3*5"
+SPACES_AROUND_POWER_OPERATOR = true
+
+# Follow normal comment style by adding two spaces between code and comment
+SPACES_BEFORE_COMMENT = 2
+
+# If list of items is comma terminated, always split to one per line.
+SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED = true
+
+# Related to previous one, if list of items (args or dict/list/...) needs to be split, split to one per line.
+# SPLIT_ALL_COMMA_SEPARATED_VALUES = true
+
+# Split dict generators for clarity (add line breaks between { and key: val etc.
+SPLIT_BEFORE_DICT_SET_GENERATOR = true
+
+# Split method(k1=v1, k2=v2...) to separate lines
+SPLIT_BEFORE_NAMED_ASSIGNS = true
+
+# For complex (for some definition of complex) comprehensions, put output, for and if to separate lines
+SPLIT_COMPLEX_COMPREHENSION = true
+
+# When splitting something to multiple lines ('method(\n val...'), intend by 4
+CONTINUATION_INDENT_WIDTH = 4
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 000000000..a7216af43
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,20 @@
+language: python
+
+python:
+ - "3.5"
+ - "3.6"
+
+# TODO: remove these overrides once default dist supports python 3.7
+matrix:
+ include:
+ - python: 3.7
+ dist: xenial
+ sudo: true
+
+install:
+ - "pip install astroid==2.0.0 flake8 pylint pytest isort yapf requests kafka-python aiohttp avro-python3 aiosocksy"
+
+script:
+ - "make pylint"
+ - "make flake8"
+ - "make unittest"
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 000000000..be3f7b28e
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,661 @@
+ GNU AFFERO GENERAL PUBLIC LICENSE
+ Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+ A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate. Many developers of free software are heartened and
+encouraged by the resulting cooperation. However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+ The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community. It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server. Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+ An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals. This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU Affero General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Remote Network Interaction; Use with the GNU General Public License.
+
+ Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software. This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time. Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU Affero General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU Affero General Public License for more details.
+
+ You should have received a copy of the GNU Affero General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source. For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code. There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 000000000..fa15133f4
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,13 @@
+include karapace/*.py
+include karapace.json
+include karapace.unit
+include tests/*.py
+include scripts/*
+include README.rst
+include version.py
+include setup.py
+include setup.cfg
+include LICENSE
+include MANIFEST.in
+
+recursive-exclude examples *~ *.pyc \.*
diff --git a/Makefile b/Makefile
new file mode 100644
index 000000000..a6a45bcc5
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,74 @@
+SHORT_VER = 0.1.0
+LONG_VER = $(shell git describe --long 2>/dev/null || echo $(SHORT_VER)-0-unknown-g`git describe --always`)
+KAFKA_PATH = kafka_2.12-2.1.0
+KAFKA_TAR = $(KAFKA_PATH).tgz
+PYTHON_SOURCE_DIRS = karapace/
+PYTHON_TEST_DIRS = tests/
+ALL_PYTHON_DIRS = $(PYTHON_SOURCE_DIRS) $(PYTHON_TEST_DIRS)
+GENERATED = karapace/version.py
+PYTHON = python3
+
+default: $(GENERATED)
+
+clean:
+ rm -rf rpm/
+
+.PHONY: build-dep-fedora
+build-dep-fedora:
+ sudo dnf builddep karapace.spec
+
+karapace/version.py: version.py
+ $(PYTHON) $^ $@
+
+$(KAFKA_TAR):
+ wget "http://www.nic.funet.fi/pub/mirrors/apache.org/kafka/2.1.0/$(KAFKA_PATH).tgz"
+
+$(KAFKA_PATH): $(KAFKA_TAR)
+ gtar zxf "$(KAFKA_TAR)"
+
+.PHONY: kafka
+kafka: $(KAFKA_PATH)
+ "$(KAFKA_PATH)/bin/zookeeper-server-start.sh" "$(KAFKA_PATH)/config/zookeeper.properties" &
+ "$(KAFKA_PATH)/bin/kafka-server-start.sh" "$(KAFKA_PATH)/config/server.properties" &
+
+.PHONY: pylint
+pylint:
+ python3 -m pylint --rcfile .pylintrc $(ALL_PYTHON_DIRS)
+
+.PHONY: flake8
+flake8:
+ python3 -m flake8 --config .flake8 $(ALL_PYTHON_DIRS)
+
+.PHONY: copyright
+copyright:
+ grep -EL "Copyright \(c\) 20.* Aiven" $(shell git ls-files "*.py" | grep -v __init__.py)
+
+.PHONY: unittest
+unittest:
+ python3 -m pytest -s -vvv tests/
+
+.PHONY: test
+test: flake8 pylint copyright unittest
+
+.PHONY: isort
+isort:
+ time isort --recursive $(ALL_PYTHON_DIRS)
+
+.PHONY: yapf
+yapf:
+ time yapf --parallel --recursive --in-place $(ALL_PYTHON_DIRS)
+
+.PHONY: reformat
+reformat: isort yapf
+
+.PHONY: rpm
+rpm: $(GENERATED)
+ git archive --output=karapace-rpm-src.tar --prefix=karapace/ HEAD
+ # add generated files to the tar, they're not in git repository
+ tar -r -f karapace-rpm-src.tar --transform=s,karapace/,karapace/karapace/, $(GENERATED)
+ rpmbuild -bb karapace.spec \
+ --define '_topdir $(PWD)/rpm' \
+ --define '_sourcedir $(CURDIR)' \
+ --define 'major_version $(SHORT_VER)' \
+ --define 'minor_version $(subst -,.,$(subst $(SHORT_VER)-,,$(LONG_VER)))'
+ $(RM) karapace-rpm-src.tar
diff --git a/NEWS b/NEWS
new file mode 100644
index 000000000..5c8766f6c
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,6 @@
+Karapace 0.1.0 (2019-01-16)
+===========================
+
+* Initial release with schema storing, retrieving and compatibility testing functionality
+* Unittests for the functionality
+* aiohttp based architecture
diff --git a/README.rst b/README.rst
new file mode 100644
index 000000000..6afe76397
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,334 @@
+Karapace |BuildStatus|_
+=======================
+
+.. |BuildStatus| image:: https://travis-ci.org/aiven/karapace.png?branch=master
+.. _BuildStatus: https://travis-ci.org/aiven/karapace
+
+``karapace`` Your Kafka essentials in one tool
+
+
+Features
+========
+
+* Schema Registry that is 1:1 Compatible with the pre-existing proprietary
+ Confluent Schema Registry
+* Drop in replacement both on pre-existing Schema Registry client and
+ server-sides
+* Moderate memory consumption
+* Asynchronous architecture based on aiohttp
+
+
+Overview
+========
+
+Karapace supports the storing of schemas in a central repository, which
+clients can access to serialize and deserialize messages. The schemas also
+maintain their own version histories and can be checked for compatibility
+between their different respective versions.
+
+
+Requirements
+============
+
+Karapace requires Python 3.6 or later and some additional components in
+order to operate:
+
+* aiohttp_ for serving schemas over HTTP in an asynchronous fashion
+* avro-python3_ for Avro serialization
+* kafka-python_ to read, write and coordinate Karapace's persistence in Kafka
+
+.. _`aiohttp`: https://github.com/aio-libs/aiohttp
+.. _`avro-python3`: https://github.com/apache/avro
+.. _`kafka-python`: https://github.com/dpkp/kafka-python
+
+Developing and testing Karapace also requires the following utilities:
+requests_, flake8_, pylint_ and pytest_.
+
+.. _`flake8`: https://flake8.readthedocs.io/
+.. _`requests`: http://www.python-requests.org/en/latest/
+.. _`pylint`: https://www.pylint.org/
+.. _`pytest`: http://pytest.org/
+
+Karapace has been developed and tested on modern Linux x86-64 systems, but
+should work on other platforms that provide the required modules.
+
+
+Building
+========
+
+To build an installation package for your distribution, go to the root
+directory of a Karapace Git checkout and run:
+
+Fedora::
+
+ make rpm
+
+This will produce a ``.rpm`` package usually into ``rpm/RPMS/noarch/``.
+
+Python/Other::
+
+ python3 setup.py bdist_egg
+
+This will produce an egg file into a dist directory within the same folder.
+
+
+Installation
+============
+
+To install it run as root:
+
+Fedora::
+
+ dnf install rpm/RPMS/noarch/*
+
+On Linux systems it is recommended to simply run ``karapace`` under
+``systemd``::
+
+ systemctl enable karapace.service
+
+and eventually after the setup section, you can just run::
+
+ systemctl start karapace.service
+
+Python/Other::
+
+ easy_install dist/karapace-0.1.0-py3.6.egg
+
+
+Setup
+=====
+
+After this you need to create a suitable JSON configuration file for your
+installation. Keys to take special care are the ones needed to configure
+Kafka and advertised_hostname.
+
+To see descriptions of configuration keys see section ``config``. Here's an
+example configuration file to give you an idea what you need to change::
+
+ {
+ "advertised_hostname": "localhost",
+ "bootstrap_uri": "127.0.0.1:9092",
+ "client_id": "sr-1",
+ "compatibility": "FULL",
+ "group_id": "schema-registry",
+ "host": "127.0.0.1",
+ "log_level": "DEBUG",
+ "port": 8081,
+ "master_eligibility": true,
+ "replication_factor": 1,
+ "security_protocol": "PLAINTEXT",
+ "ssl_cafile": null,
+ "ssl_certfile": null,
+ "ssl_keyfile": null,
+ "topic_name": "_schemas"
+ }
+
+
+Quickstart
+==========
+
+To Register the first version of a schema under the subject "test"::
+
+ $ curl -X POST -H "Content-Type: application/vnd.schemaregistry.v1+json" \
+ --data '{"schema": "{\"type\": \"record\", \"name\": \"Obj\", \"fields\":[{\"name\": \"age\", \"type\": \"int\"}]}"}' \
+ http://localhost:8081/subjects/test-key/versions
+ {"id":1}
+
+
+To list all subjects (including the one created just above)::
+
+ $ curl -X GET http://localhost:8081/subjects
+ ["test-key"]
+
+To list all the versions of a given schema (including the one just created above)::
+
+ $ curl -X GET http://localhost:8081/subjects/test-key/versions
+ [1]
+
+To fetch back the schema whose global id is 1 (i.e. the one registered above)::
+
+ $ curl -X GET http://localhost:8081/schemas/ids/1
+ {"schema":"{\"fields\":[{\"name\":\"age\",\"type\":\"int\"}],\"name\":\"Obj\",\"type\":\"record\"}"}
+
+To get the specific version 1 of the schema just registered run::
+
+ $ curl -X GET http://localhost:8081/subjects/test-key/versions/1
+ {"subject":"test-key","version":1,"id":1,"schema":"{\"fields\":[{\"name\":\"age\",\"type\":\"int\"}],\"name\":\"Obj\",\"type\":\"record\"}"}
+
+To get the latest version of the schema under subject test-key run::
+
+ $ curl -X GET http://localhost:8081/subjects/Kafka-value/versions/latest
+ {"subject":"test-key","version":1,"id":1,"schema":"{\"fields\":[{\"name\":\"age\",\"type\":\"int\"}],\"name\":\"Obj\",\"type\":\"record\"}"}
+
+In order to delete version 10 of the schema registered under subject "test-key" (if it exists)::
+
+ $ curl -X DELETE http://localhost:8081/subjects/test-key/versions/10
+ 10
+
+To Delete all versions of the schema registered under subject "test-key"::
+
+ $ curl -X DELETE http://localhost:8081/subjects/test-key
+ [1]
+
+Test the compatibility of a schema with the latest schema under subject "test-key"::
+
+ $ curl -X POST -H "Content-Type: application/vnd.schemaregistry.v1+json" \
+ --data '{"schema": "{\"type\": \"int\"}"}' \
+ http://localhost:8081/compatibility/subjects/test-key/versions/latest
+ {"is_compatible":true}
+
+Get current global backwards compatibility setting value::
+
+ $ curl -X GET http://localhost:8081/config
+ {"compatibilityLevel":"BACKWARD"}
+
+Change compatibility requirements for all subjects where it's not
+specifically defined otherwise::
+
+ $ curl -X PUT -H "Content-Type: application/vnd.schemaregistry.v1+json" \
+ --data '{"compatibility": "NONE"}' http://localhost:8081/config
+ {"compatibility":"NONE"}
+
+Change compatibility requirement to FULL for the test-key subject::
+
+ $ curl -X PUT -H "Content-Type: application/vnd.schemaregistry.v1+json" \
+ --data '{"compatibility": "FULL"}' http://localhost:8081/config/test-key
+ {"compatibility":"FULL"}
+
+
+Backing up your Karapace
+========================
+
+Karapace natively stores its data in a Kafka topic the name of which you can
+configure freely but which by default is called _schemas.
+
+To easily back up the data in the topic you can run Kafka's Java console
+consumer::
+
+ ./kafka-console-consumer.sh --bootstrap-server brokerhostname:9092 --topic _schemas --from-beginning --property print.key=true --timeout-ms 1000 1> schemas.log
+
+
+Restoring Karapace from backup
+==============================
+
+Kafka's Java console producer can then in turn be used to restore the data
+to a new Kafka cluster.
+
+You can restore the data from the previous step by running::
+
+ ./kafka-console-producer.sh --broker-list brokerhostname:9092 --topic _schemas --property parse.key=true < schemas.log
+
+
+Commands
+========
+
+Once installed, the ``karapace`` program should be in your path. It is the
+main daemon process that should be run under a service manager such as
+``systemd`` to serve clients.
+
+
+Configuration keys
+==================
+
+``advertised_hostname`` (default ``socket.gethostname()``)
+
+The hostname being advertised to other instances of Karapace that are
+attached to the same Kafka group. All nodes within the cluster need to have
+their advertised_hostname's set so that they can all reach each other.
+
+``bootstrap_uri`` (default ``localhost:9092``)
+
+The URI to the Kafka service where to store the schemas and to run
+coordination among the Karapace instances.
+
+``client_id`` (default ``sr-1``)
+
+The client_id name by which the Karapace will use when coordinating with
+other Karapaces who is master. The one with the name that sorts as the
+first alphabetically is chosen as master from among the services with
+master_eligibility set to true.
+
+``group_id`` (default ``schema-registry``)
+
+The Kafka group name used for selecting a master service to coordinate the
+storing of Schemas.
+
+``master_eligibility`` (``true``)
+
+Should the service instance be considered for promotion to be the master
+service. Reason to turn this off would be to have an instances of Karapace
+running somewhere else for HA purposes but which you wouldn't want to
+automatically promote to master if the primary instances were to become
+unavailable.
+
+``security_protocol`` (default ``PLAINTEXT``)
+
+Default Kafka security protocol needed to communicate with the Kafka
+cluster. Other options is to use SSL for SSL client certificate
+authentication.
+
+``ssl_cafile`` (default ``Path to CA certificate``)
+
+Used when security_protocol is set to SSL, the path to the SSL CA certificate.
+
+``ssl_certfile`` (default ``/path/to/certfile``)
+
+Used when security_protocol is set to SSL, the path to the SSL certfile.
+
+``ssl_keyfile`` (default ``/path/to/keyfile``)
+
+Used when security_protocol is set to SSL, the path to the SSL keyfile.
+
+``topic_name`` (default ``_schemas``)
+
+The name of the Kafka topic where to store the schemas.
+
+``replication_factor`` (default ``1``)
+
+The replication factor to be used with the schema topic.
+
+``host`` (default ``"127.0.0.1"``)
+
+Address to bind the Karapace HTTP server to. Set to an empty string to
+listen to all available addresses.
+
+``port`` (default ``8081``)
+
+HTTP webserver port to bind the Karapace to.
+
+
+License
+=======
+
+Karapace is licensed under the AGPL, version 3. Full license text is
+available in the ``LICENSE`` file.
+
+Please note that the project explicitly does not require a CLA (Contributor
+License Agreement) from its contributors.
+
+
+Contact
+=======
+
+Bug reports and patches are very welcome, please post them as GitHub issues
+and pull requests at https://github.com/aiven/karapace . Any possible
+vulnerabilities or other serious issues should be reported directly to the
+maintainers .
+
+
+Credits
+=======
+
+Karapace was created by, and is maintained by, Aiven_ cloud data hub
+developers.
+
+The schema storing part of Karapace loans heavily from the ideas of the
+earlier Schema Registry implementation by Confluent and thanks are in order
+to them for pioneering the concept.
+
+.. _`Aiven`: https://aiven.io/
+
+Recent contributors are listed on the GitHub project page,
+https://github.com/aiven/karapace/graphs/contributors
+
+Copyright ⓒ 2019 Aiven Ltd.
diff --git a/karapace.config.json b/karapace.config.json
new file mode 100644
index 000000000..46556b664
--- /dev/null
+++ b/karapace.config.json
@@ -0,0 +1,17 @@
+{
+ "advertised_hostname": "localhost",
+ "bootstrap_uri": "127.0.0.1:9092",
+ "client_id": "sr-1",
+ "compatibility": "FULL",
+ "group_id": "schema-registry",
+ "host": "127.0.0.1",
+ "log_level": "DEBUG",
+ "port": 8081,
+ "master_eligibility": true,
+ "replication_factor": 1,
+ "security_protocol": "PLAINTEXT",
+ "ssl_cafile": null,
+ "ssl_certfile": null,
+ "ssl_keyfile": null,
+ "topic_name": "_schemas"
+}
diff --git a/karapace.spec b/karapace.spec
new file mode 100644
index 000000000..b325e9b92
--- /dev/null
+++ b/karapace.spec
@@ -0,0 +1,57 @@
+Name: karapace
+Version: %{major_version}
+Release: %{minor_version}%{?dist}
+Url: http://github.com/aiven/karapace
+Summary: Your Kafka essentials in one tool
+License: AGPL 3
+Source0: karapace-rpm-src.tar
+BuildRequires: python3-aiohttp
+BuildRequires: python3-aiosocksy
+BuildRequires: python3-avro
+BuildRequires: python3-devel
+BuildRequires: python3-flake8
+BuildRequires: python3-isort
+BuildRequires: python3-kafka
+BuildRequires: python3-pylint
+BuildRequires: python3-pytest
+BuildRequires: python3-requests
+BuildRequires: python3-yapf
+Requires: python3-aiohttp
+Requires: python3-aiosocksy
+Requires: python3-avro
+Requires: python3-kafka
+Requires: python3-requests
+Requires: systemd
+
+%undefine _missing_build_ids_terminate_build
+
+%description
+Your Kafka essentials in one tool
+
+
+%prep
+%setup -q -n karapace
+
+
+%install
+python3 setup.py install --prefix=%{_prefix} --root=%{buildroot}
+sed -e "s@#!/bin/python@#!%{_bindir}/python@" -i %{buildroot}%{_bindir}/*
+rm -rf %{buildroot}%{python3_sitelib}/tests/
+%{__install} -Dm0644 karapace.unit %{buildroot}%{_unitdir}/karapace.service
+%{__mkdir_p} %{buildroot}%{_localstatedir}/lib/karapace
+
+
+%check
+make -j flake8 pylint
+
+%files
+%defattr(-,root,root,-)
+%doc LICENSE README.rst karapace.config.json
+%{_bindir}/karapace*
+%{_unitdir}/karapace.service
+%{python3_sitelib}/*
+
+
+%changelog
+* Mon Jan 14 2019 Hannu Valtonen - 0.0.1
+- Initial RPM package
diff --git a/karapace.unit b/karapace.unit
new file mode 100644
index 000000000..1d05210a4
--- /dev/null
+++ b/karapace.unit
@@ -0,0 +1,14 @@
+[Unit]
+Description=Your Kafka essentials in one tool
+
+[Service]
+User=karapace
+Group=karapace
+Type=notify
+Restart=always
+ExecStart=/usr/bin/karapace /etc/karapace/karapace.config.json
+ExecReload=/bin/kill -HUP $MAINPID
+WorkingDirectory=/var/lib
+
+[Install]
+WantedBy=multi-user.target
diff --git a/karapace/__init__.py b/karapace/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/karapace/compatibility.py b/karapace/compatibility.py
new file mode 100644
index 000000000..99179079f
--- /dev/null
+++ b/karapace/compatibility.py
@@ -0,0 +1,120 @@
+"""
+karapace - schema compatibility checking
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import avro.schema
+import logging
+
+
+class IncompatibleSchema(Exception):
+ pass
+
+
+class Compatibility:
+ def __init__(self, source, target, compatibility):
+ self.source = source
+ self.target = target
+ self.log = logging.getLogger("Compatibility")
+ self.compatibility = compatibility
+ self.log.info("Compatibility initialized with level: %r", self.compatibility)
+
+ def check(self):
+ if self.compatibility == "NONE":
+ self.log.info("Compatibility level set to NONE, no schema compatibility checks performed")
+ return True
+ return self.check_compatibility(self.source, self.target)
+
+ def contains(self, field, target): # pylint: disable=no-self-use
+ return bool(field in target.fields)
+
+ def check_same_name(self, source, target): # pylint: disable=no-self-use
+ return source.name == target.name
+
+ def check_same_type(self, source, target): # pylint: disable=no-self-use, too-many-return-statements
+ """Returns info on if the types are the same and whether it's a basetype or not"""
+ self.log.error("source: %s, target: %s", type(source.type), type(target.type))
+ self.log.error("source: %s, target: %s", source, target)
+
+ if isinstance(source.type, str):
+ return source.type == target.type, True
+ if isinstance(source.type, avro.schema.PrimitiveSchema):
+ if isinstance(target.type, avro.schema.PrimitiveSchema):
+ # Follow promotion rules in schema resolution section of:
+ # https://avro.apache.org/docs/current/spec.html#schemas
+ if source.type.type == "int" and target.type.type in {"int", "long", "float", "double"}:
+ return True, True
+ if source.type.type == "long" and target.type.type in {"long", "float", "double"}:
+ return True, True
+ if source.type.type == "float" and target.type.type in {"float", "double"}:
+ return True, True
+ if source.type.type == "string" and target.type.type in {"string", "bytes"}:
+ return True, True
+ return source.type.type == target.type.type, True
+ return False, True
+ if isinstance(source.type, avro.schema.RecordSchema):
+ return isinstance(target.type, avro.schema.RecordSchema), False
+ if isinstance(source.type, avro.schema.EnumSchema):
+ return isinstance(target.type, avro.schema.EnumSchema), True
+ raise IncompatibleSchema("Unhandled schema type: {}".format(type(source.type)))
+
+ def check_source_field(self, source, target):
+ for source_field in source.fields:
+ if self.contains(source_field, target): # this is an optimization to check for identical fields
+ self.log.info("source_field: identical %s in both source and target: %s", source_field.name, target)
+ continue
+ # The fields aren't identical in both but could be similar enough (i.e. changed default)
+ found = False
+ for target_field in target.fields:
+ if self.check_same_name(source_field, target_field):
+ # Ok we found the same named fields
+ same_type, base_type = self.check_same_type(source_field, target_field)
+ if not same_type: # different types
+ raise IncompatibleSchema(
+ "source_field.type: {} != target_field.type: {}".format(source_field.type, target_field.type)
+ )
+ if not base_type: # same type but nested structure
+ self.log.info("Recursing source with: source: %s target: %s", source_field, target_field)
+ self.check_compatibility(source_field.type, target_field.type)
+ else:
+ found = True
+ break
+ if not found:
+ self.log.info("source_field: %s removed from: %s", source_field.name, target)
+ if not found and self.compatibility in {"FORWARD", "FULL"} and not source_field.has_default:
+ raise IncompatibleSchema("Source field: {} removed".format(source_field.name))
+
+ def check_target_field(self, source, target):
+ for target_field in target.fields:
+ if self.contains(target_field, source):
+ self.log.info("target_field: %r in both source and target: %r", target_field.name, source)
+ continue
+ # The fields aren't identical in both but could be similar enough (i.e. changed default)
+ found = False
+ for source_field in source.fields:
+ if self.check_same_name(source_field, target_field):
+ same_type, base_type = self.check_same_type(source_field, target_field)
+ if not same_type:
+ raise IncompatibleSchema(
+ "source_field.type: {} != target_field.type: {}".format(source_field.type, target_field.type)
+ )
+ if not base_type:
+ self.log.info("Recursing target with: source: %s target: %s", source_field, target_field)
+ self.check_compatibility(source_field.type, target_field.type)
+ else:
+ found = True
+ self.log.info("source_field is: %s, target_field: %s added", source_field, target_field)
+ break
+
+ if not found and self.compatibility in {"BACKWARD", "FULL"} and not target_field.has_default:
+ raise IncompatibleSchema("Target field: {} added".format(target_field.name))
+
+ def check_compatibility(self, source, target):
+ same_type, _ = self.check_same_type(source, target)
+ if not same_type:
+ raise IncompatibleSchema("source {} and target {} different types".format(source, target))
+
+ if source.type == "record":
+ self.check_source_field(source, target)
+ self.check_target_field(source, target)
diff --git a/karapace/config.py b/karapace/config.py
new file mode 100644
index 000000000..f62f2026d
--- /dev/null
+++ b/karapace/config.py
@@ -0,0 +1,26 @@
+"""
+karapace - configuration validation
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import socket
+
+
+def set_config_defaults(config):
+ config.setdefault("advertised_hostname", socket.gethostname())
+ config.setdefault("bootstrap_uri", "127.0.0.1:9092")
+ config.setdefault("client_id", "sr-1")
+ config.setdefault("compatibility", "BACKWARD")
+ config.setdefault("group_id", "schema-registry")
+ config.setdefault("host", "127.0.0.1")
+ config.setdefault("log_level", "DEBUG")
+ config.setdefault("port", 8081)
+ config.setdefault("master_eligibility", True)
+ config.setdefault("replication_factor", 1)
+ config.setdefault("security_protocol", "PLAINTEXT")
+ config.setdefault("ssl_cafile", None)
+ config.setdefault("ssl_certfile", None)
+ config.setdefault("ssl_keyfile", None)
+ config.setdefault("topic_name", "_schemas")
+ return config
diff --git a/karapace/karapace.py b/karapace/karapace.py
new file mode 100644
index 000000000..53b9de6e7
--- /dev/null
+++ b/karapace/karapace.py
@@ -0,0 +1,421 @@
+"""
+karapace - main
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import asyncio
+import avro.schema
+import json
+import logging
+import os
+import sys
+import time
+
+from kafka import KafkaProducer
+from karapace.compatibility import Compatibility, IncompatibleSchema
+from karapace.config import set_config_defaults
+from karapace.master_coordinator import MasterCoordinator
+from karapace.rapu import HTTPResponse, RestApp
+from karapace.schema_reader import KafkaSchemaReader
+from karapace.utils import json_encode
+
+LOG_FORMAT_JOURNAL = "%(name)-20s\t%(threadName)s\t%(levelname)-8s\t%(message)s"
+logging.basicConfig(level=logging.INFO, format=LOG_FORMAT_JOURNAL)
+
+COMPATIBILITY_MODES = {"NONE", "FULL", "FORWARD", "BACKWARD"}
+
+
+class InvalidConfiguration(Exception):
+ pass
+
+
+class Karapace(RestApp):
+ def __init__(self, config_path):
+ super().__init__(app_name="Karapace")
+ self.config = {}
+ self.config_path = config_path
+ self.log = logging.getLogger("Karapace")
+ self.kafka_timeout = 10
+
+ self.route(
+ "/compatibility/subjects//versions/",
+ callback=self.compatibility_check,
+ method="POST"
+ )
+
+ self.route("/config/", callback=self.config_subject_get, method="GET")
+ self.route("/config/", callback=self.config_subject_set, method="PUT")
+ self.route("/config", callback=self.config_get, method="GET")
+ self.route("/config", callback=self.config_set, method="PUT")
+
+ self.route("/schemas/ids/", callback=self.schemas_get, method="GET")
+
+ self.route("/subjects", callback=self.subjects_list, method="GET")
+ self.route("/subjects//versions", callback=self.subject_post, method="POST")
+ self.route("/subjects//versions", callback=self.subject_versions_list, method="GET")
+ self.route("/subjects//versions/", callback=self.subject_version_get, method="GET")
+ self.route("/subjects//versions/", callback=self.subject_version_delete, method="DELETE")
+ self.route("/subjects/", callback=self.subject_delete, method="DELETE")
+
+ self.ksr = None
+ self.read_config()
+ self._create_producer()
+ self._create_schema_reader()
+ self._create_master_coordinator()
+ self.app.on_startup.append(self.create_http_client)
+
+ self.master_lock = asyncio.Lock()
+
+ self.log.info("Karapace initialized")
+
+ def close(self):
+ self.log.info("Shutting down all auxiliary threads")
+ if self.mc:
+ self.mc.close()
+ if self.ksr:
+ self.ksr.close()
+ if self.producer:
+ self.producer.close()
+
+ def read_config(self):
+ if os.path.exists(self.config_path):
+ try:
+ config = json.loads(open(self.config_path, "r").read())
+ self.config = set_config_defaults(config)
+ try:
+ logging.getLogger().setLevel(config["log_level"])
+ except ValueError:
+ self.log.excption("Problem with log_level: %r", config["log_level"])
+ except Exception as ex:
+ raise InvalidConfiguration(ex)
+ else:
+ raise InvalidConfiguration()
+
+ def _create_schema_reader(self):
+ self.ksr = KafkaSchemaReader(config=self.config, )
+ self.ksr.start()
+
+ def _create_master_coordinator(self):
+ self.mc = MasterCoordinator(config=self.config)
+ self.mc.start()
+
+ def _create_producer(self):
+ self.producer = KafkaProducer(
+ bootstrap_servers=self.config["bootstrap_uri"],
+ security_protocol=self.config["security_protocol"],
+ ssl_cafile=self.config["ssl_cafile"],
+ ssl_certfile=self.config["ssl_certfile"],
+ ssl_keyfile=self.config["ssl_keyfile"],
+ api_version=(1, 0, 0),
+ )
+
+ @staticmethod
+ def r(body, status=200):
+ raise HTTPResponse(
+ body=body,
+ status=status,
+ content_type="application/vnd.schemaregistry.v1+json",
+ headers={},
+ )
+
+ def get_offset_from_queue(self, sent_offset):
+ start_time = time.monotonic()
+ while True:
+ self.log.info("Starting to wait for offset: %r from ksr queue", sent_offset)
+ offset = self.ksr.queue.get()
+ if offset == sent_offset:
+ self.log.info(
+ "We've consumed back produced offset: %r message back, everything is in sync, took: %.4f", offset,
+ time.monotonic() - start_time
+ )
+ break
+ elif offset != sent_offset:
+ self.log.error("Put the offset: %r back to queue, someone else is waiting for this?", offset)
+ self.ksr.queue.put(offset)
+
+ def send_kafka_message(self, key, value):
+ if isinstance(key, str):
+ key = key.encode("utf8")
+ if isinstance(value, str):
+ value = value.encode("utf8")
+
+ future = self.producer.send(self.config["topic_name"], key=key, value=value)
+ self.producer.flush(timeout=self.kafka_timeout)
+ msg = future.get(self.kafka_timeout)
+ self.log.warning("Sent kafka msg key: %r, value: %r, offset: %r", key, value, msg.offset)
+ self.get_offset_from_queue(msg.offset)
+ return future
+
+ def send_schema_message(self, subject, parsed_schema_json, schema_id, version, deleted):
+ key = f'{{"subject":"{subject}","version":{version},"magic":1,"keytype":"SCHEMA"}}'
+ value = {
+ "subject": subject,
+ "version": version,
+ "id": schema_id,
+ "schema": json_encode(parsed_schema_json, compact=True),
+ "deleted": deleted
+ }
+ return self.send_kafka_message(key, json_encode(value, compact=True))
+
+ def send_config_message(self, compatibility_level, subject=None):
+ if subject is not None:
+ key = '{{"subject":"{}","magic":0,"keytype":"CONFIG"}}'.format(subject)
+ else:
+ key = '{"subject":null,"magic":0,"keytype":"CONFIG"}'
+ value = f'{{"compatibilityLevel":"{compatibility_level}"}}'
+ return self.send_kafka_message(key, value)
+
+ def send_delete_subject_message(self, subject):
+ key = f'{{"subject":"{subject}","magic":0,"keytype":"DELETE_SUBJECT"}}'
+ value = f'{{"subject":"{subject}","version":2}}'
+ return self.send_kafka_message(key, value)
+
+ async def compatibility_check(self, *, subject, version, request):
+ """Check for schema compatibility"""
+ body = request.json
+ self.log.info("Got request to check subject: %r, version_id: %r compatibility", subject, version)
+ old = await self.subject_version_get(subject=subject, version=version, return_dict=True)
+ self.log.info("Existing schema: %r, new_schema: %r", old["schema"], body["schema"])
+ try:
+ new = avro.schema.Parse(body["schema"])
+ except avro.schema.SchemaParseException:
+ self.log.warning("Invalid schema: %r", body["schema"])
+ self.r(body={"error_code": 44201, "message": "Invalid Avro schema"}, status=422)
+ try:
+ old_schema = avro.schema.Parse(json.loads(old["schema"]))
+ except avro.schema.SchemaParseException:
+ self.log.warning("Invalid existing schema: %r", old["schema"])
+ self.r(body={"error_code": 44201, "message": "Invalid Avro schema"}, status=422)
+
+ compat = Compatibility(
+ source=old_schema,
+ target=new,
+ compatibility=old.get("compatibility", self.ksr.config["compatibility"])
+ )
+ try:
+ compat.check()
+ except IncompatibleSchema as ex:
+ self.log.warning("Invalid schema %s found by compatibility check: old: %s new: %s", ex, old_schema, new)
+ self.r({"is_compatible": False})
+ self.r({"is_compatible": True})
+
+ async def schemas_get(self, *, schema_id):
+ schema = self.ksr.schemas.get(int(schema_id))
+ if not schema:
+ self.log.warning("Schema: %r that was requested, not found", int(schema_id))
+ self.r(body={"error_code": 40403, "message": "Schema not found"}, status=404)
+ self.r({"schema": schema})
+
+ async def config_get(self):
+ self.r({"compatibilityLevel": self.ksr.config["compatibility"]})
+
+ async def config_set(self, *, request):
+ if "compatibility" in request.json and request.json["compatibility"] in COMPATIBILITY_MODES:
+ compatibility_level = request.json["compatibility"]
+ self.send_config_message(compatibility_level=compatibility_level, subject=None)
+ else:
+ self.r(
+ body={
+ "error_code": 42203,
+ "message": "Invalid compatibility level. Valid values are none, backward, forward and full"
+ },
+ status=422
+ )
+ self.r({"compatibility": self.ksr.config["compatibility"]})
+
+ async def config_subject_get(self, *, subject):
+ subject_data = self.ksr.subjects.get(subject)
+ if not subject_data:
+ self.r({"error_code": 40401, "message": "no subject"}, status=404)
+
+ if "compatibility" in subject_data:
+ self.r({"compatibilityLevel": subject_data["compatibility"]})
+
+ self.r({"error_code": 40401, "message": "Subject not found."}, status=404)
+
+ async def config_subject_set(self, *, request, subject):
+ subject_data = self.ksr.subjects.get(subject)
+ if not subject_data:
+ self.r({"error_code": 40401, "message": "no subject"}, status=404)
+
+ if "compatibility" in request.json and request.json["compatibility"] in COMPATIBILITY_MODES:
+ self.send_config_message(compatibility_level=request.json["compatibility"], subject=subject)
+ else:
+ self.r(body={"error_code": 42203, "message": "Invalid compatibility level"}, status=422)
+
+ self.r({"compatibility": request.json["compatibility"]})
+
+ async def subjects_list(self):
+ self.r(list(self.ksr.subjects.keys()))
+
+ async def subject_delete(self, *, subject):
+ subject_data = self.ksr.subjects.get(subject, {})
+ if not subject_data:
+ self.r({"error_code": 40401, "message": "subject does not exist"}, status=404)
+
+ self.send_delete_subject_message(subject)
+ self.r(list(subject_data["schemas"]), status=200)
+
+ async def subject_version_get(self, *, subject, version, return_dict=False):
+ if version != "latest" and int(version) < 1:
+ self.r({
+ "error_code": 42202,
+ "message": 'The specified version is not a valid version id. '
+ 'Allowed values are between [1, 2^31-1] and the string "latest"'
+ },
+ status=422)
+
+ subject_data = self.ksr.subjects.get(subject)
+ if not subject_data:
+ self.r({"error_code": 40401, "message": "no subject"}, status=404)
+
+ max_version = max(subject_data["schemas"])
+ if version == "latest":
+ schema = subject_data["schemas"][max(subject_data["schemas"])]
+ version = max(subject_data["schemas"])
+ elif int(version) <= max_version:
+ schema = subject_data["schemas"].get(int(version))
+ else:
+ self.r({"error_code": 40402, "message": "Version not found."}, status=404)
+
+ schema_string = schema["schema"]
+ schema_id = schema["id"]
+ ret = {
+ "subject": subject,
+ "version": int(version),
+ "id": schema_id,
+ "schema": json_encode(schema_string, compact=True)
+ }
+ if return_dict:
+ # Return also compatibility information to compatibility check
+ if subject_data.get("compatibility"):
+ ret["compatibility"] = subject_data.get("compatibility")
+ return ret
+ self.r(ret)
+
+ async def subject_version_delete(self, *, subject, version):
+ version = int(version)
+ subject_data = self.ksr.subjects.get(subject)
+ if not subject_data:
+ self.r({"error_code": 40401, "message": "subject not found"}, status=404)
+
+ schema = subject_data["schemas"].get(version, None)
+ if not schema:
+ self.r({"error_code": 40402, "message": "Version not found."}, status=404)
+ schema_id = schema["id"]
+ self.send_schema_message(subject, schema, schema_id, version, deleted=True)
+ self.r(str(version), status=200)
+
+ async def subject_versions_list(self, *, subject):
+ subject_data = self.ksr.subjects.get(subject)
+ if not subject_data:
+ self.r({"error_code": 40401, "message": "subject not found"}, status=404)
+
+ self.r(list(subject_data["schemas"]), status=200)
+
+ async def get_master(self):
+ async with self.master_lock:
+ while True:
+ master, master_url = self.mc.get_master_info()
+ if master is None:
+ self.log.info("No master set: %r, url: %r", master, master_url)
+ elif self.ksr.ready is False:
+ self.log.info("Schema reader isn't ready yet: %r", self.ksr.ready)
+ else:
+ return master, master_url
+ await asyncio.sleep(1.0)
+
+ async def subject_post(self, *, subject, request):
+ body = request.json
+ self.log.debug("POST with subject: %r, request: %r", subject, body)
+ are_we_master, master_url = await self.get_master()
+ if are_we_master:
+ self.write_new_schema_local(subject, body)
+ elif are_we_master is None:
+ self.r({"error_code": 50003, "message": "Error while forwarding the request to the master."}, status=500)
+ else:
+ await self.write_new_schema_remote(subject, body, master_url)
+
+ def write_new_schema_local(self, subject, body):
+ """Since we're the master we get to write the new schema"""
+ self.log.info("Writing new schema locally since we're the master")
+ try:
+ new_schema = avro.schema.Parse(body["schema"])
+ except avro.schema.SchemaParseException:
+ self.log.warning("Invalid schema: %r", body["schema"])
+ self.r(body={"error_code": 44201, "message": "Invalid Avro schema"}, status=422)
+
+ if subject not in self.ksr.subjects:
+ schema_id = self.ksr.get_new_schema_id()
+ version = 1
+ self.log.info(
+ "Registering new subject: %r with version: %r to schema %r, schema_id: %r", subject, version,
+ new_schema.to_json(), schema_id
+ )
+ else:
+ # First check if any of the existing schemas for the subject match
+ subject_data = self.ksr.subjects[subject]
+ schema_versions = sorted(list(subject_data["schemas"]))
+ # Go through these in version order
+ for version in schema_versions:
+ schema = subject_data["schemas"][version]
+ parsed_version_schema = avro.schema.Parse(schema["schema"])
+ if parsed_version_schema == new_schema:
+ self.r({"id": schema["id"]})
+ else:
+ self.log.debug("schema: %s did not match with: %s", schema["schema"], new_schema.to_json())
+
+ # Run a compatibility check between the newest on file schema and the one being submitted now
+ latest_schema = subject_data["schemas"][schema_versions[-1]]
+ old_schema = avro.schema.Parse(latest_schema["schema"])
+ compat = Compatibility(
+ old_schema,
+ new_schema,
+ compatibility=subject_data.get("compatibility", self.ksr.config["compatibility"])
+ )
+ try:
+ compat.check()
+ except IncompatibleSchema as ex:
+ self.log.warning("Incompatible schema: %s", ex)
+ self.r(
+ body={
+ "error_code": 409,
+ "message": "Schema being registered is incompatible with an earlier schema"
+ },
+ status=409
+ )
+
+ # We didn't find an existing schema, so go and create one
+ schema_id = self.ksr.get_new_schema_id()
+ version = max(self.ksr.subjects[subject]["schemas"]) + 1
+ self.log.info(
+ "Registering subject: %r, id: %r new version: %r with schema %r, schema_id: %r", subject, schema_id, version,
+ new_schema.to_json(), schema_id
+ )
+ self.send_schema_message(subject, new_schema.to_json(), schema_id, version, deleted=False)
+ self.r({"id": schema_id})
+
+ async def write_new_schema_remote(self, subject, body, master_url):
+ self.log.info("Writing new schema to remote url: %r since we're not the master", master_url)
+ response = await self.http_request(
+ url="{}/subjects/{}/versions".format(master_url, subject), method="POST", json=body, timeout=60.0
+ )
+ self.r(body=response.body, status=response.status)
+
+
+def main():
+ if len(sys.argv) != 2:
+ print("Not enough arguments, given - usage schema config.json")
+ return 1
+
+ config_path = sys.argv[1]
+ if not os.path.exists(config_path):
+ print("Config file: {} does not exist, exiting".format(config_path))
+ return 1
+ kc = Karapace(config_path)
+ return kc.run(host=kc.config["host"], port=kc.config["port"])
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/karapace/master_coordinator.py b/karapace/master_coordinator.py
new file mode 100644
index 000000000..0f3b5bfa4
--- /dev/null
+++ b/karapace/master_coordinator.py
@@ -0,0 +1,177 @@
+"""
+karapace - master coordinator
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import json
+import logging
+import time
+from threading import Lock, Thread
+
+from kafka.client_async import KafkaClient
+from kafka.coordinator.base import BaseCoordinator
+from kafka.errors import NoBrokersAvailable, NodeNotReadyError
+from kafka.metrics import MetricConfig, Metrics
+
+# SR group errors
+NO_ERROR = 0
+DUPLICATE_URLS = 1
+
+
+def get_identity_url(scheme, host, port):
+ return f"{scheme}://{host}:{port}"
+
+
+class SchemaCoordinator(BaseCoordinator):
+ hostname = None
+ port = None
+ scheme = None
+ master = None
+ master_url = None
+ log = logging.getLogger("SchemaCoordinator")
+
+ def protocol_type(self):
+ return "sr"
+
+ @staticmethod
+ def get_identity(*, host, port, scheme, json_encode=True):
+ res = {"version": 1, "host": host, "port": port, "scheme": scheme, "master_eligibility": True}
+ if json_encode:
+ return json.dumps(res)
+ return res
+
+ def group_protocols(self):
+ return [("v0", self.get_identity(host=self.hostname, port=self.port, scheme=self.scheme))]
+
+ def _perform_assignment(self, leader_id, protocol, members):
+ self.log.info("Creating assignment: %r, protocol: %r, members: %r", leader_id, protocol, members)
+ self.master = None
+ error = NO_ERROR
+ urls = {}
+ for member_id, member_data in members:
+ member_identity = json.loads(member_data.decode("utf8"))
+ if member_identity["master_eligibility"] is True:
+ urls[get_identity_url(member_identity["scheme"], member_identity["host"],
+ member_identity["port"])] = (member_id, member_data)
+
+ lowest_url = sorted(urls)[0]
+ schema_master_id, member_data = urls[lowest_url]
+ member_identity = json.loads(member_data.decode("utf8"))
+ identity = self.get_identity(
+ host=member_identity["host"],
+ port=member_identity["port"],
+ scheme=member_identity["scheme"],
+ json_encode=False,
+ )
+ self.log.info("Chose: %r with url: %r as the master", schema_master_id, lowest_url)
+ self.master_url = lowest_url
+
+ assignments = {}
+ for member_id, member_data in members:
+ assignments[member_id] = json.dumps({"master": schema_master_id, "master_identity": identity, "error": error})
+ return assignments
+
+ def _on_join_prepare(self, generation, member_id):
+ """Invoked prior to each group join or rejoin."""
+ # needs to be implemented in our class for pylint to be satisfied
+
+ def _on_join_complete(self, generation, member_id, protocol, member_assignment_bytes):
+ self.log.info(
+ "Join complete, generation %r, member_id: %r, protocol: %r, member_assignment_bytes: %r", generation, member_id,
+ protocol, member_assignment_bytes
+ )
+ member_assignment = json.loads(member_assignment_bytes.decode("utf8"))
+ member_identity = member_assignment["master_identity"]
+
+ master_url = get_identity_url(
+ scheme=member_identity["scheme"],
+ host=member_identity["host"],
+ port=member_identity["port"],
+ )
+ if member_assignment["master"] == member_id:
+ self.master_url = master_url
+ self.master = True
+ else:
+ self.master_url = master_url
+ self.master = False
+ return super(SchemaCoordinator, self)._on_join_complete(generation, member_id, protocol, member_assignment_bytes)
+
+ def _on_join_follower(self):
+ self.log.info("We are a follower, not a master")
+ return super(SchemaCoordinator, self)._on_join_follower()
+
+
+class MasterCoordinator(Thread):
+ """Handles schema topic creation and master election"""
+
+ def __init__(self, config):
+ Thread.__init__(self)
+ self.config = config
+ self.api_version_auto_timeout_ms = 30000
+ self.timeout_ms = 10000
+ self.kafka_client = None
+ self.running = True
+ self.sc = None
+ metrics_tags = {"client-id": self.config["client_id"]}
+ metric_config = MetricConfig(samples=2, time_window_ms=30000, tags=metrics_tags)
+ self._metrics = Metrics(metric_config, reporters=[])
+ self.lock = Lock()
+ self.lock.acquire()
+ self.log = logging.getLogger("MasterCoordinator")
+
+ def init_kafka_client(self):
+ try:
+ self.kafka_client = KafkaClient(
+ api_version_auto_timeout_ms=self.api_version_auto_timeout_ms,
+ bootstrap_servers=self.config["bootstrap_uri"],
+ client_id=self.config["client_id"],
+ security_protocol=self.config["security_protocol"],
+ ssl_cafile=self.config["ssl_cafile"],
+ ssl_certfile=self.config["ssl_certfile"],
+ ssl_keyfile=self.config["ssl_keyfile"],
+ )
+ return True
+ except (NodeNotReadyError, NoBrokersAvailable):
+ self.log.warning("No Brokers available yet, retrying init_kafka_client()")
+ time.sleep(2.0)
+ return False
+
+ def init_schema_coordinator(self):
+ self.sc = SchemaCoordinator(
+ client=self.kafka_client,
+ metrics=self._metrics,
+ group_id=self.config["group_id"],
+ )
+ self.sc.hostname = self.config["advertised_hostname"]
+ self.sc.port = self.config["port"]
+ self.sc.scheme = "http"
+ self.lock.release() # self.sc now exists, we get to release the lock
+
+ def get_master_info(self):
+ """Return whether we're the master, and the actual master url that can be used if we're not"""
+ with self.lock:
+ return self.sc.master, self.sc.master_url
+
+ def close(self):
+ self.log.info("Closing master_coordinator")
+ self.running = False
+ if self.kafka_client:
+ self.kafka_client.close()
+
+ def run(self):
+ while self.running:
+ try:
+ if not self.kafka_client:
+ if self.init_kafka_client() is False:
+ continue
+ if not self.sc:
+ self.init_schema_coordinator()
+
+ self.sc.ensure_active_group()
+ self.log.info("We're master: %r: master_uri: %r", self.sc.master, self.sc.master_url)
+ time.sleep(5.0)
+ except: # pylint: disable=bare-except
+ self.log.exception("Exception in master_coordinator")
+ time.sleep(1.0)
+ self.close()
diff --git a/karapace/rapu.py b/karapace/rapu.py
new file mode 100644
index 000000000..21e5e05d3
--- /dev/null
+++ b/karapace/rapu.py
@@ -0,0 +1,306 @@
+"""
+karapace -
+Custom middleware system on top of `aiohttp` implementing HTTP server and
+client components for use in Aiven's REST applications.
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import asyncio
+import hashlib
+import json as jsonlib
+import logging
+import re
+import ssl
+import time
+
+import aiohttp
+import aiohttp.web
+import aiosocksy
+import aiosocksy.connector
+import async_timeout
+from karapace.statsd import statsd_client
+from karapace.utils import json_encode
+from karapace.version import __version__
+
+
+SERVER_NAME = f"Karapace/{__version__}"
+
+
+class HTTPRequest:
+ def __init__(self, *, url, query, headers, path_for_stats, method):
+ self.url = url
+ self.headers = headers
+ self.query = query
+ self.path_for_stats = path_for_stats
+ self.method = method
+ self.json = None
+
+
+class HTTPResponse(Exception):
+ """A custom Response object derived from Exception so it can be raised
+ in response handler callbacks."""
+
+ def __init__(self, body, *, status=200, content_type=None, headers=None):
+ self.body = body
+ self.status = status
+ self.headers = dict(headers) if headers else {}
+ if isinstance(body, (dict, list)):
+ self.headers["Content-Type"] = "application/json"
+ self.json = body
+ else:
+ self.json = None
+ if content_type:
+ self.headers["Content-Type"] = content_type
+ super().__init__("HTTPResponse {}".format(status))
+
+ def ok(self):
+ if self.status < 200 or self.status >= 300:
+ return False
+ return True
+
+
+class RestApp:
+ def __init__(self, *, app_name):
+ self.app_name = app_name
+ self.app_request_metric = "{}_request".format(app_name)
+ self.app = aiohttp.web.Application()
+ self.app.on_startup.append(self.create_http_client)
+ self.app.on_cleanup.append(self.cleanup_http_client)
+ self.http_client_v = None
+ self.http_client_no_v = None
+ self.log = logging.getLogger(self.app_name)
+ self.stats = self._get_stats_client()
+ self.app.on_cleanup.append(self.cleanup_stats_client)
+
+ def _get_stats_client(self):
+ return statsd_client(app=self.app_name)
+
+ async def cleanup_stats_client(self, app): # pylint: disable=unused-argument
+ self.stats.close()
+
+ async def create_http_client(self, app): # pylint: disable=unused-argument
+ no_v_conn = aiohttp.TCPConnector(verify_ssl=False)
+ self.http_client_no_v = aiohttp.ClientSession(connector=no_v_conn, headers={"User-Agent": SERVER_NAME})
+ self.http_client_v = aiohttp.ClientSession(headers={"User-Agent": SERVER_NAME})
+
+ async def cleanup_http_client(self, app): # pylint: disable=unused-argument
+ if self.http_client_no_v:
+ await self.http_client_no_v.close()
+ if self.http_client_v:
+ await self.http_client_v.close()
+
+ @staticmethod
+ def cors_and_server_headers_for_request(*, request, origin="*"): # pylint: disable=unused-argument
+ return {
+ "Access-Control-Allow-Origin": origin,
+ "Access-Control-Allow-Methods": "DELETE, GET, OPTIONS, POST, PUT",
+ "Access-Control-Allow-Headers": "Authorization, Content-Type",
+ "Server": SERVER_NAME,
+ }
+
+ async def _handle_request(self, *, request, path_for_stats, callback, callback_with_request=False, json_request=False):
+ start_time = time.monotonic()
+ resp = None
+ rapu_request = HTTPRequest(
+ headers=request.headers,
+ query=request.query,
+ method=request.method,
+ url=request.url,
+ path_for_stats=path_for_stats,
+ )
+ try:
+ if request.method == "OPTIONS":
+ origin = request.headers.get("Origin")
+ if not origin:
+ raise HTTPResponse(body="OPTIONS missing Origin", status=400)
+ headers = self.cors_and_server_headers_for_request(request=rapu_request, origin=origin)
+ raise HTTPResponse(body=b"", status=200, headers=headers)
+
+ body = await request.read()
+ if json_request:
+ if not body:
+ raise HTTPResponse(body="Missing request JSON body", status=400)
+ if request.charset and request.charset.lower() != "utf-8" and request.charset.lower() != "utf8":
+ raise HTTPResponse(body="Request character set must be UTF-8", status=400)
+ try:
+ body_string = body.decode("utf-8")
+ rapu_request.json = jsonlib.loads(body_string)
+ except jsonlib.decoder.JSONDecodeError:
+ raise HTTPResponse(body="Invalid request JSON body", status=400)
+ except UnicodeDecodeError:
+ raise HTTPResponse(body="Request body is not valid UTF-8", status=400)
+ else:
+ if body not in {b"", b"{}"}:
+ raise HTTPResponse(body="No request body allowed for this operation", status=400)
+
+ callback_kwargs = dict(request.match_info)
+ if callback_with_request:
+ callback_kwargs["request"] = rapu_request
+
+ try:
+ data = await callback(**callback_kwargs)
+ status = 200
+ headers = {}
+ except HTTPResponse as ex:
+ data = ex.body
+ status = ex.status
+ headers = ex.headers
+ headers.update(self.cors_and_server_headers_for_request(request=rapu_request))
+
+ if isinstance(data, (dict, list)):
+ resp_bytes = json_encode(data, binary=True, sort_keys=True, compact=True)
+ elif isinstance(data, str):
+ if "Content-Type" not in headers:
+ headers["Content-Type"] = "text/plain; charset=utf-8"
+ resp_bytes = data.encode("utf-8")
+ else:
+ resp_bytes = data
+
+ # On 204 - NO CONTENT there is no point of calculating cache headers
+ if 200 >= status <= 299:
+ if resp_bytes:
+ etag = '"{}"'.format(hashlib.md5(resp_bytes).hexdigest())
+ else:
+ etag = '""'
+ if_none_match = request.headers.get("if-none-match")
+ if if_none_match and if_none_match.replace("W/", "") == etag:
+ status = 304
+ resp_bytes = b""
+
+ headers["access-control-expose-headers"] = "etag"
+ headers["etag"] = etag
+
+ resp = aiohttp.web.Response(body=resp_bytes, status=status, headers=headers)
+ except HTTPResponse as ex:
+ if isinstance(ex.body, str):
+ resp = aiohttp.web.Response(text=ex.body, status=ex.status, headers=ex.headers)
+ else:
+ resp = aiohttp.web.Response(body=ex.body, status=ex.status, headers=ex.headers)
+ except asyncio.CancelledError:
+ self.log.debug("Client closed connection")
+ raise
+ except Exception as ex: # pylint: disable=broad-except
+ self.stats.unexpected_exception(ex=ex, where="rapu_wrapped_callback")
+ self.log.exception("Unexpected error handling user request: %s %s", request.method, request.url)
+ resp = aiohttp.web.Response(text="Internal Server Error", status=500)
+ finally:
+ self.stats.timing(
+ self.app_request_metric,
+ time.monotonic() - start_time,
+ tags={
+ "path": path_for_stats,
+ # no `resp` means that we had a failure in exception handler
+ "result": resp.status if resp else 0,
+ "method": request.method,
+ }
+ )
+
+ return resp
+
+ def route(self, path, *, callback, method, with_request=None, json_body=None):
+ # pretty path for statsd reporting
+ path_for_stats = re.sub(r"<[\w:]+>", "x", path)
+
+ # bottle compatible routing
+ aio_route = path
+ aio_route = re.sub(r"<(\w+):path>", r"{\1:.+}", aio_route)
+ aio_route = re.sub(r"<(\w+)>", r"{\1}", aio_route)
+
+ if (method in {"POST", "PUT"}) and with_request is None:
+ with_request = True
+
+ if with_request and json_body is None:
+ json_body = True
+
+ async def wrapped_callback(request):
+ return await self._handle_request(
+ request=request,
+ path_for_stats=path_for_stats,
+ callback=callback,
+ callback_with_request=with_request,
+ json_request=json_body,
+ )
+
+ async def wrapped_cors(request):
+ return await self._handle_request(
+ request=request,
+ path_for_stats=path_for_stats,
+ callback=None,
+ )
+
+ self.app.router.add_route(method, aio_route, wrapped_callback)
+ try:
+ self.app.router.add_route("OPTIONS", aio_route, wrapped_cors)
+ except RuntimeError as ex:
+ if "Added route will never be executed, method OPTIONS is already registered" not in str(ex):
+ raise
+
+ async def http_request(
+ self,
+ url,
+ *,
+ method="GET",
+ json=None,
+ timeout=10.0,
+ verify=True,
+ proxy=None,
+ ):
+ close_session = False
+ proxy_auth = None
+ proxy_url = None
+
+ if isinstance(verify, str):
+ sslcontext = ssl.create_default_context(cadata=verify)
+ else:
+ sslcontext = None
+
+ if proxy:
+ proxy_auth = aiosocksy.Socks5Auth(proxy["username"], proxy["password"])
+ connector = aiosocksy.connector.ProxyConnector(
+ remote_resolve=False,
+ verify_ssl=verify,
+ ssl_context=sslcontext,
+ )
+ session = aiohttp.ClientSession(
+ connector=connector,
+ request_class=aiosocksy.connector.ProxyClientRequest,
+ )
+ proxy_url = "socks5://{host}:{port}".format_map(proxy)
+ close_session = True
+ elif sslcontext:
+ conn = aiohttp.TCPConnector(ssl_context=sslcontext)
+ session = aiohttp.ClientSession(connector=conn)
+ close_session = True
+ elif verify is True:
+ session = self.http_client_v
+ elif verify is False:
+ session = self.http_client_no_v
+ else:
+ raise ValueError("invalid arguments to http_request")
+
+ func = getattr(session, method.lower())
+ try:
+ with async_timeout.timeout(timeout):
+ async with func(url, json=json, proxy=proxy_url, proxy_auth=proxy_auth) as response:
+ if response.headers.get("content-type", "").startswith("application/json"):
+ resp_content = await response.json()
+ else:
+ resp_content = await response.text()
+ result = HTTPResponse(body=resp_content, status=response.status)
+ finally:
+ if close_session:
+ await session.close()
+
+ return result
+
+ def run(self, *, host, port):
+ aiohttp.web.run_app(
+ app=self.app,
+ host=host,
+ port=port,
+ access_log_format='%Tfs %{x-client-ip}i "%r" %s "%{user-agent}i" response=%bb request_body=%{content-length}ib',
+ )
+
+ def add_routes(self):
+ pass # Override in sub-classes
diff --git a/karapace/schema_reader.py b/karapace/schema_reader.py
new file mode 100644
index 000000000..aa4e0d44d
--- /dev/null
+++ b/karapace/schema_reader.py
@@ -0,0 +1,201 @@
+"""
+karapace - Kafka schema reader
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import json
+import logging
+import time
+from queue import Queue
+from threading import Thread
+
+from kafka import KafkaConsumer
+from kafka.admin import KafkaAdminClient, NewTopic
+from kafka.errors import (NoBrokersAvailable, NodeNotReadyError, TopicAlreadyExistsError)
+
+
+class KafkaSchemaReader(Thread):
+ def __init__(self, config):
+ Thread.__init__(self)
+ self.log = logging.getLogger("KafkaSchemaReader")
+ self.api_version_auto_timeout_ms = 30000
+ self.topic_creation_timeout_ms = 20000
+ self.timeout_ms = 200
+ self.config = config
+ self.subjects = {}
+ self.schemas = {}
+ self.global_schema_id = 0
+ self.offset = 0
+ self.admin_client = None
+ self.topic_num_partitions = 1
+ self.topic_replication_factor = self.config["replication_factor"]
+ self.consumer = None
+ self.queue = Queue()
+ self.ready = False
+ self.running = True
+
+ def init_consumer(self):
+ # Group not set on purpose, all consumers read the same data
+ self.consumer = KafkaConsumer(
+ self.config["topic_name"],
+ enable_auto_commit=False,
+ api_version=(1, 0, 0),
+ bootstrap_servers=self.config["bootstrap_uri"],
+ client_id=self.config["client_id"],
+ security_protocol=self.config["security_protocol"],
+ ssl_cafile=self.config["ssl_cafile"],
+ ssl_certfile=self.config["ssl_certfile"],
+ ssl_keyfile=self.config["ssl_keyfile"],
+ auto_offset_reset="earliest",
+ )
+
+ def init_admin_client(self):
+ try:
+ self.admin_client = KafkaAdminClient(
+ api_version_auto_timeout_ms=self.api_version_auto_timeout_ms,
+ bootstrap_servers=self.config["bootstrap_uri"],
+ client_id=self.config["client_id"],
+ security_protocol=self.config["security_protocol"],
+ ssl_cafile=self.config["ssl_cafile"],
+ ssl_certfile=self.config["ssl_certfile"],
+ ssl_keyfile=self.config["ssl_keyfile"],
+ )
+ return True
+ except (NodeNotReadyError, NoBrokersAvailable):
+ self.log.warning("No Brokers available yet, retrying init_admin_client()")
+ time.sleep(2.0)
+ return False
+
+ def create_schema_topic(self):
+ schema_topic = NewTopic(
+ name=self.config["topic_name"],
+ num_partitions=self.topic_num_partitions,
+ replication_factor=self.config["replication_factor"],
+ topic_configs={"cleanup.policy": "compact"}
+ )
+ try:
+ self.log.info("Creating topic: %r", schema_topic)
+ self.admin_client.create_topics([schema_topic], timeout_ms=self.topic_creation_timeout_ms)
+ self.log.info("Topic: %r created successfully", self.config["topic_name"])
+ return True
+ except TopicAlreadyExistsError:
+ self.log.warning("Topic: %r already exists", self.config["topic_name"])
+ return True
+ return False
+
+ def get_new_schema_id(self):
+ self.global_schema_id += 1
+ return self.global_schema_id
+
+ def close(self):
+ self.log.info("Closing schema_reader")
+ self.running = False
+ if self.admin_client:
+ self.admin_client.close()
+ if self.consumer:
+ self.consumer.close()
+
+ def run(self):
+ while self.running:
+ if not self.admin_client:
+ if self.init_admin_client() is False:
+ continue
+ self.create_schema_topic()
+ if not self.consumer:
+ self.init_consumer()
+ self.handle_messages()
+
+ def handle_messages(self):
+ raw_msgs = self.consumer.poll(timeout_ms=self.timeout_ms)
+ if self.ready is False and raw_msgs == {}:
+ self.ready = True
+
+ for _, msgs in raw_msgs.items():
+ for msg in msgs:
+ try:
+ key = json.loads(msg.key.decode("utf8"))
+ except json.JSONDecodeError:
+ self.log.exception("Invalid JSON in msg.key: %r, value: %r", msg.key, msg.value)
+ continue
+
+ value = None
+ if msg.value:
+ try:
+ value = json.loads(msg.value.decode("utf8"))
+ except json.JSONDecodeError:
+ self.log.exception("Invalid JSON in msg.value: %r, key: %r", msg.value, msg.key)
+ continue
+
+ self.log.info("Read new record: key: %r, value: %r, offset: %r", key, value, msg.offset)
+ self.handle_msg(key, value)
+ self.offset = msg.offset
+ self.log.info("Handled message, current offset: %r", self.offset)
+ if self.ready:
+ self.queue.put(self.offset)
+
+ def handle_msg(self, key, value):
+ if key["keytype"] == "CONFIG":
+ if "subject" in key and key["subject"] is not None:
+ self.log.info(
+ "Setting subject: %r config to: %r, value: %r", key["subject"], value["compatibilityLevel"], value
+ )
+ subject_data = self.subjects.get(key["subject"])
+ subject_data["compatibility"] = value["compatibilityLevel"]
+ else:
+ self.log.info("Setting global config to: %r, value: %r", value["compatibilityLevel"], value)
+ self.config["compatibility"] = value["compatibilityLevel"]
+ elif key["keytype"] == "SCHEMA":
+ subject = value["subject"]
+ if subject not in self.subjects:
+ self.log.info("Adding first version of subject: %r, value: %r", subject, value)
+ self.subjects[subject] = {
+ "schemas": {
+ value["version"]: {
+ "schema": value["schema"],
+ "version": value["version"],
+ "id": value["id"],
+ }
+ }
+ }
+ self.log.info("Setting schema_id: %r with schema: %r", value["id"], value["schema"])
+ self.schemas[value["id"]] = value["schema"]
+ self.global_schema_id = value["id"]
+ elif value["deleted"] is True:
+ self.log.info("Deleting subject: %r, version: %r", subject, value["version"])
+ entry = self.subjects[subject]["schemas"].pop(value["version"], None)
+ if not entry:
+ self.log.error(
+ "Subject: %r, version: %r, value: %r did not exist, should have.", subject, value["version"], value
+ )
+ else:
+ self.log.info("Deleting schema_id: %r, schema: %r", value["id"], self.schemas.get(value["id"]))
+ entry = self.schemas.pop(value["id"], None)
+ if not entry:
+ self.log.error("Schema: %r did not exist, should have", value["id"])
+ elif value["deleted"] is False:
+ self.log.info("Adding new version of subject: %r, value: %r", subject, value)
+ self.subjects[subject]["schemas"][value["version"]] = {
+ "schema": value["schema"],
+ "version": value["version"],
+ "id": value["id"]
+ }
+ self.log.info("Setting schema_id: %r with schema: %r", value["id"], value["schema"])
+ self.schemas[value["id"]] = value["schema"]
+ self.global_schema_id = value["id"]
+ elif key["keytype"] == "DELETE_SUBJECT":
+ self.log.info("Deleting subject: %r, value: %r", value["subject"], value)
+ subject = self.subjects.pop(value["subject"], None)
+ if not subject:
+ self.log.error("Subject: %r did not exist, should have", value["subject"])
+ else:
+ for schema in subject["schemas"].values():
+ self.log.info(
+ "Deleting subject: %r, schema_id: %r, schema: %r", subject, schema["id"],
+ self.schemas.get(schema["id"])
+ )
+ entry = self.schemas.pop(schema["id"], None)
+ if not entry:
+ self.log.error("Schema: %r did not exist, should have", schema["id"])
+ elif key["keytype"] == "NOOP": # for spec completeness
+ pass
diff --git a/karapace/statsd.py b/karapace/statsd.py
new file mode 100644
index 000000000..2b0199a46
--- /dev/null
+++ b/karapace/statsd.py
@@ -0,0 +1,125 @@
+"""
+karapace - statsd
+
+Supports telegraf's statsd protocol extension for 'key=value' tags:
+
+ https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import datetime
+import logging
+import os
+import socket
+import time
+from contextlib import contextmanager
+
+STATSD_HOST = "127.0.0.1"
+STATSD_PORT = 8125
+
+
+class StatsClient:
+ def __init__(self, host="127.0.0.1", port=8125, tags=None):
+ self.log = logging.getLogger("StatsClient")
+ self.sentry_config = {}
+ self.update_sentry_config({
+ "dsn": os.environ.get("SENTRY_DSN") or None,
+ "tags": tags,
+ "ignore_exceptions": [
+ "ClientConnectorError", # influxdb, aiohttp
+ "ClientPayloadError", # infludb (aiohttp)
+ "ConnectionLoss", # kazoo, zkwrap
+ "ConnectionRefusedError", # mostly kafka (asyncio)
+ "ConnectionResetError", # paramiko, kafka, requests
+ "IncompleteReadError", # kafka (asyncio)
+ "ServerDisconnectedError", # influxdb (aiohttp)
+ "ServerTimeoutError", # influxdb (aiohttp)
+ "TimeoutError", # kafka, redis
+ ]
+ })
+ self._dest_addr = (host, port)
+ self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ self._tags = tags or {}
+
+ @contextmanager
+ def timing_manager(self, metric, tags=None):
+ start_time = time.monotonic()
+ yield
+ self.timing(metric, time.monotonic() - start_time, tags)
+
+ def update_sentry_config(self, config):
+ new_config = self.sentry_config.copy()
+ new_config.update(config)
+ if new_config == self.sentry_config:
+ return
+
+ self.sentry_config = new_config
+ if self.sentry_config.get("dsn"):
+ try:
+ # Lazy-load raven as this file is loaded by a lot of tools
+ import raven
+ self.raven_client = raven.Client(**self.sentry_config)
+ except ImportError:
+ self.raven_client = None
+ self.log.warning("Cannot enable Sentry.io sending: importing 'raven' failed")
+ else:
+ self.raven_client = None
+
+ def gauge(self, metric, value, tags=None):
+ self._send(metric, b"g", value, tags)
+
+ def increase(self, metric, inc_value=1, tags=None):
+ self._send(metric, b"c", inc_value, tags)
+
+ def timing(self, metric, value, tags=None):
+ self._send(metric, b"ms", value, tags)
+
+ def unexpected_exception(self, ex, where, tags=None, *, elapsed=None):
+ all_tags = {
+ "exception": ex.__class__.__name__,
+ "where": where,
+ }
+ all_tags.update(tags or {})
+ self.increase("exception", tags=all_tags)
+ if self.raven_client:
+ raven_tags = {**(tags or {}), "where": where}
+ self.raven_client.captureException(tags=raven_tags, time_spent=elapsed)
+
+ def _send(self, metric, metric_type, value, tags):
+ if None in self._dest_addr:
+ # stats sending is disabled
+ return
+
+ try:
+ # format: "user.logins,service=payroll,region=us-west:1|c"
+ parts = [metric.encode("utf-8"), b":", str(value).encode("utf-8"), b"|", metric_type]
+ send_tags = self._tags.copy()
+ send_tags.update(tags or {})
+ for tag, tag_value in sorted(send_tags.items()):
+ if tag_value is None:
+ tag_value = ""
+ elif isinstance(tag_value, datetime.datetime):
+ if tag_value.tzinfo:
+ tag_value = tag_value.astimezone(datetime.timezone.utc).replace(tzinfo=None)
+ tag_value = tag_value.isoformat()[:19].replace("-", "").replace(":", "") + "Z"
+ elif isinstance(tag_value, datetime.timedelta):
+ tag_value = "{}s".format(int(tag_value.total_seconds()))
+ elif not isinstance(tag_value, str):
+ tag_value = str(tag_value)
+ if " " in tag_value or ":" in tag_value or "|" in tag_value or "=" in tag_value:
+ tag_value = "INVALID"
+ parts.insert(1, ",{}={}".format(tag, tag_value).encode("utf-8"))
+
+ self._socket.sendto(b"".join(parts), self._dest_addr)
+ except Exception as ex: # pylint: disable=broad-except
+ self.log.error("Unexpected exception in statsd send: %s: %s", ex.__class__.__name__, ex)
+
+ def close(self):
+ self._socket.close()
+
+
+def statsd_client(*, app, tags=None):
+ tags = (tags or {}).copy()
+ tags["app"] = app
+ return StatsClient(host=STATSD_HOST, port=STATSD_PORT, tags=tags)
diff --git a/karapace/utils.py b/karapace/utils.py
new file mode 100644
index 000000000..d5edcb45c
--- /dev/null
+++ b/karapace/utils.py
@@ -0,0 +1,50 @@
+"""
+karapace - utils
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import datetime
+import decimal
+import json as jsonlib
+import types
+
+
+def isoformat(datetime_obj=None, *, preserve_subsecond=False, compact=False):
+ """Return datetime to ISO 8601 variant suitable for users.
+ Assume UTC for datetime objects without a timezone, always use
+ the Z timezone designator."""
+ if datetime_obj is None:
+ datetime_obj = datetime.datetime.utcnow()
+ elif datetime_obj.tzinfo:
+ datetime_obj = datetime_obj.astimezone(datetime.timezone.utc).replace(tzinfo=None)
+ isof = datetime_obj.isoformat()
+ if not preserve_subsecond:
+ isof = isof[:19]
+ if compact:
+ isof = isof.replace("-", "").replace(":", "").replace(".", "")
+ return isof + "Z"
+
+
+def default_json_serialization(obj):
+ if isinstance(obj, datetime.datetime):
+ return isoformat(obj, preserve_subsecond=True)
+ if isinstance(obj, datetime.timedelta):
+ return obj.total_seconds()
+ if isinstance(obj, decimal.Decimal):
+ return str(obj)
+ if isinstance(obj, types.MappingProxyType):
+ return dict(obj)
+
+ raise TypeError("Object of type {!r} is not JSON serializable".format(obj.__class__.__name__))
+
+
+def json_encode(obj, *, compact=True, sort_keys=True, binary=False):
+ res = jsonlib.dumps(
+ obj,
+ sort_keys=sort_keys if sort_keys is not None else not compact,
+ indent=None if compact else 4,
+ separators=(",", ":") if compact else None,
+ default=default_json_serialization
+ )
+ return res.encode("utf-8") if binary else res
diff --git a/karapace/version.py b/karapace/version.py
new file mode 100644
index 000000000..b09440f15
--- /dev/null
+++ b/karapace/version.py
@@ -0,0 +1,7 @@
+"""
+karapace - version
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+__version__ = "0.0.1-0-unknown-10a2904"
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 000000000..1c3dd6f6f
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+addopts = -ra -q --tb=short --showlocals
diff --git a/setup.py b/setup.py
new file mode 100644
index 000000000..6a2b380a4
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,58 @@
+"""
+karapace - setup
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+from setuptools import setup, find_packages
+import os
+import version
+
+
+readme_path = os.path.join(os.path.dirname(__file__), "README.rst")
+with open(readme_path, "r") as fp:
+ readme_text = fp.read()
+
+
+version_for_setup_py = version.get_project_version("karapace/version.py")
+version_for_setup_py = ".dev".join(version_for_setup_py.split("-", 2)[:2])
+
+
+setup(
+ name="karapace",
+ version=version_for_setup_py,
+ zip_safe=False,
+ packages=find_packages(exclude=["test"]),
+ install_requires=[
+ "aiohttp",
+ ],
+ extras_require={},
+ dependency_links=[],
+ package_data={},
+ entry_points={
+ "console_scripts": [
+ "karapace = karapace.karapace:main",
+ ],
+ },
+ author="Hannu Valtonen",
+ author_email="hannu.valtonen@aiven.io",
+ license="AGPL 3",
+ platforms=["POSIX", "MacOS"],
+ description="Karapace",
+ long_description=readme_text,
+ url="https://github.com/aiven/karapace/",
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Information Technology",
+ "Intended Audience :: System Administrators",
+ "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
+ "Programming Language :: Python :: 3.3",
+ "Programming Language :: Python :: 3.4",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Topic :: Database :: Database Engines/Servers",
+ "Topic :: Software Development :: Libraries",
+ ],
+)
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 000000000..5eb673254
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,267 @@
+"""
+karapace - conftest
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import contextlib
+import os
+import random
+import signal
+import socket
+import subprocess
+import time
+
+import pytest
+
+KAFKA_CURRENT_VERSION = "2.1"
+BASEDIR = "kafka_2.12-2.1.0"
+
+
+class Timeout(Exception):
+ pass
+
+
+def port_is_listening(hostname, port, ipv6):
+ if ipv6:
+ s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, 0)
+ else:
+ s = socket.socket()
+ s.settimeout(0.5)
+ try:
+ s.connect((hostname, port))
+ return True
+ except socket.error:
+ return False
+
+
+def wait_for_port(port, *, hostname="127.0.0.1", wait_time=20.0, ipv6=False):
+ start_time = time.monotonic()
+ while True:
+ if port_is_listening(hostname, port, ipv6):
+ break
+ elif time.monotonic() - start_time > wait_time:
+ raise Timeout("Timeout waiting for port {} on host {}".format(port, hostname))
+ time.sleep(2.0)
+ print("Port {} on host {} started listening in {} seconds".format(port, hostname, time.monotonic() - start_time))
+
+
+def get_random_port(*, start=3000, stop=30000, blacklist=None):
+ if isinstance(blacklist, int):
+ blacklist = [blacklist]
+
+ while True:
+ value = random.randrange(start, stop)
+ if not blacklist or value not in blacklist:
+ return value
+
+
+@pytest.fixture(scope="session", name="session_tmpdir")
+def fixture_session_tmpdir(tmpdir_factory):
+ """Create a temporary directory object that's usable in the session scope. The returned value is a
+ function which creates a new temporary directory which will be automatically cleaned up upon exit."""
+ tmpdir_obj = tmpdir_factory.mktemp("karapace.sssion.tmpdr.")
+
+ def subdir():
+ return tmpdir_obj.mkdtemp(rootdir=tmpdir_obj)
+
+ try:
+ yield subdir
+ finally:
+ with contextlib.suppress(Exception):
+ tmpdir_obj.remove(rec=1)
+
+
+@pytest.fixture(scope="session", name="zkserver")
+def fixture_zkserver(session_tmpdir):
+ config, proc = zkserver_base(session_tmpdir)
+ try:
+ yield config
+ finally:
+ os.kill(proc.pid, signal.SIGKILL)
+ proc.wait(timeout=10.0)
+
+
+@pytest.fixture(scope="session", name="kafka_server")
+def fixture_kafka_server(session_tmpdir, zkserver):
+ config, proc = kafka_server_base(session_tmpdir, zkserver)
+ try:
+ yield config
+ finally:
+ os.kill(proc.pid, signal.SIGKILL)
+ proc.wait(timeout=10.0)
+
+
+def kafka_java_args(heap_mb, kafka_config_path, logs_dir, log4j_properties_path):
+ java_args = [
+ "-Xmx{}M".format(heap_mb),
+ "-Xms{}M".format(heap_mb),
+ "-Dkafka.logs.dir={}/logs".format(logs_dir),
+ "-Dlog4j.configuration=file:{}".format(log4j_properties_path),
+ "-cp",
+ ":".join([
+ os.path.join(BASEDIR, "libs", "*"),
+ ]),
+ "kafka.Kafka",
+ kafka_config_path,
+ ]
+ return java_args
+
+
+def get_java_process_configuration(java_args):
+ command = [
+ "/usr/bin/java",
+ "-server",
+ "-XX:+UseG1GC",
+ "-XX:MaxGCPauseMillis=20",
+ "-XX:InitiatingHeapOccupancyPercent=35",
+ "-XX:+DisableExplicitGC",
+ "-XX:+ExitOnOutOfMemoryError",
+ "-Djava.awt.headless=true",
+ "-Dcom.sun.management.jmxremote",
+ "-Dcom.sun.management.jmxremote.authenticate=false",
+ "-Dcom.sun.management.jmxremote.ssl=false",
+ ]
+ command.extend(java_args)
+ return command
+
+
+def kafka_server_base(session_tmpdir, zk):
+ datadir = session_tmpdir()
+ blacklist = [zk["admin_port"], zk["client_port"]]
+ plaintext_port = get_random_port(start=15000, blacklist=blacklist)
+
+ config = {
+ "datadir": datadir.join("data").strpath,
+ "kafka_keystore_password": "secret",
+ "kafka_port": plaintext_port,
+ "zookeeper_port": zk["client_port"],
+ }
+
+ os.makedirs(config["datadir"])
+ advertised_listeners = ",".join([
+ f"PLAINTEXT://127.0.0.1:{plaintext_port}",
+ ])
+ listeners = ",".join([
+ f"PLAINTEXT://:{plaintext_port}",
+ ])
+
+ kafka_config = {
+ "broker.id": 1,
+ "broker.rack": "local",
+ "advertised.listeners": advertised_listeners,
+ "auto.create.topics.enable": True,
+ "default.replication.factor": 1,
+ "delete.topic.enable": "true",
+ "inter.broker.listener.name": "PLAINTEXT",
+ "inter.broker.protocol.version": KAFKA_CURRENT_VERSION,
+ "listeners": listeners,
+ "log.cleaner.enable": "true",
+ "log.dirs": config["datadir"],
+ "log.message.format.version": KAFKA_CURRENT_VERSION,
+ "log.retention.check.interval.ms": 300000,
+ "log.segment.bytes": 200 * 1024 * 1024, # 200 MiB
+ "num.io.threads": 8,
+ "num.network.threads": 112,
+ "num.partitions": 1,
+ "num.replica.fetchers": 4,
+ "num.recovery.threads.per.data.dir": 1,
+ "offsets.topic.replication.factor": 1,
+ "socket.receive.buffer.bytes": 100 * 1024,
+ "socket.request.max.bytes": 100 * 1024 * 1024,
+ "socket.send.buffer.bytes": 100 * 1024,
+ "transaction.state.log.min.isr": 1,
+ "transaction.state.log.num.partitions": 16,
+ "transaction.state.log.replication.factor": 1,
+ "zookeeper.connection.timeout.ms": 6000,
+ "zookeeper.connect": "{}:{}".format("127.0.0.1", zk["client_port"])
+ }
+ kafka_config_path = os.path.join(datadir.strpath, "kafka", "config")
+ os.makedirs(kafka_config_path)
+
+ kafka_config_path = os.path.join(kafka_config_path, "server.properties")
+ with open(kafka_config_path, "w") as fp:
+ for key, value in kafka_config.items():
+ fp.write(f"{key}={value}\n")
+
+ log4j_properties_path = os.path.join(BASEDIR, "config/log4j.properties")
+
+ kafka_cmd = get_java_process_configuration(
+ java_args=kafka_java_args(
+ heap_mb=256,
+ logs_dir=os.path.join(datadir.strpath, "kafka"),
+ log4j_properties_path=log4j_properties_path,
+ kafka_config_path=kafka_config_path,
+ ),
+ )
+ proc = subprocess.Popen(kafka_cmd, env={})
+ wait_for_port(config["kafka_port"], wait_time=60)
+ return config, proc
+
+
+def zkserver_base(session_tmpdir, subdir="base"):
+ datadir = session_tmpdir()
+ path = os.path.join(datadir.strpath, subdir)
+ os.makedirs(path)
+ client_port = get_random_port(start=15000)
+ admin_port = get_random_port(start=15000, blacklist=client_port)
+ config = {
+ "client_port": client_port,
+ "admin_port": admin_port,
+ "path": path,
+ }
+ zoo_cfg = """
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just
+# example sakes.
+dataDir={path}
+# the port at which the clients will connect
+clientPort={client_port}
+#clientPortAddress=127.0.0.1
+# the maximum number of client connections.
+# increase this if you need to handle more clients
+#maxClientCnxns=60
+#
+# Be sure to read the maintenance section of the
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+#autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+#autopurge.purgeInterval=1
+# admin server
+admin.serverPort={admin_port}
+admin.enableServer=false
+# Allow reconfig calls to be made to add/remove nodes to the cluster on the fly
+reconfigEnabled=true
+# Don't require authentication for reconfig
+skipACL=yes
+""".format_map(config)
+ zoo_cfg_path = os.path.join(path, "zoo.cfg")
+ with open(zoo_cfg_path, "w") as fp:
+ fp.write(zoo_cfg)
+ env = {
+ "CLASSPATH": "/usr/share/java/slf4j/slf4j-simple.jar",
+ "ZOO_LOG_DIR": datadir.join("logs").strpath,
+ }
+ java_args = get_java_process_configuration(
+ java_args=[
+ "-cp", ":".join([
+ os.path.join(BASEDIR, "libs", "*"),
+ ]), "org.apache.zookeeper.server.quorum.QuorumPeerMain", zoo_cfg_path
+ ]
+ )
+ proc = subprocess.Popen(java_args, env=env)
+ wait_for_port(config["client_port"])
+ return config, proc
diff --git a/tests/test_master_coordinator.py b/tests/test_master_coordinator.py
new file mode 100644
index 000000000..e5c3aeb56
--- /dev/null
+++ b/tests/test_master_coordinator.py
@@ -0,0 +1,52 @@
+"""
+karapace - master coordination test
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import time
+
+from karapace.config import set_config_defaults
+from karapace.master_coordinator import MasterCoordinator
+
+
+class Timeout(Exception):
+ pass
+
+
+def init_admin(config):
+ mc = MasterCoordinator(config=config)
+ mc.start()
+ return mc
+
+
+def test_master_selection(kafka_server):
+ config_aa = set_config_defaults({})
+ config_aa["advertised_hostname"] = "127.0.0.1"
+ config_aa["bootstrap_uri"] = "127.0.0.1:{}".format(kafka_server["kafka_port"])
+ config_aa["client_id"] = "aa"
+ config_aa["port"] = 1234
+
+ mc_aa = init_admin(config_aa)
+
+ config_bb = set_config_defaults({})
+ config_bb["advertised_hostname"] = "127.0.0.1"
+ config_bb["bootstrap_uri"] = "127.0.0.1:{}".format(kafka_server["kafka_port"])
+ config_bb["client_id"] = "bb"
+ config_bb["port"] = 5678
+ mc_bb = init_admin(config_bb)
+ while True:
+ if not (mc_aa.sc or mc_bb.sc):
+ time.sleep(1.0)
+ continue
+ if not (mc_aa.sc.master or mc_bb.sc.master or mc_aa.sc.master_url or mc_bb.sc.master_url):
+ time.sleep(1.0)
+ continue
+ assert mc_aa.sc.master is True
+ assert mc_bb.sc.master is False
+ master_url = "http://{}:{}".format(config_aa["host"], config_aa["port"])
+ assert mc_aa.sc.master_url == master_url
+ assert mc_bb.sc.master_url == master_url
+ break
+ mc_aa.close()
+ mc_bb.close()
diff --git a/tests/test_schema.py b/tests/test_schema.py
new file mode 100644
index 000000000..7476d6b02
--- /dev/null
+++ b/tests/test_schema.py
@@ -0,0 +1,489 @@
+"""
+karapace - schema tests
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import json as jsonlib
+import os
+
+import pytest
+
+from karapace.karapace import Karapace
+from .utils import Client
+
+pytest_plugins = "aiohttp.pytest_plugin"
+baseurl = "http://localhost:8081"
+
+
+def create_service(datadir, kafka_server):
+ config_path = os.path.join(datadir, "karapace_config.json")
+ with open(config_path, "w") as fp:
+ karapace_config = {"log_level": "INFO", "bootstrap_uri": "127.0.0.1:{}".format(kafka_server["kafka_port"])}
+ fp.write(jsonlib.dumps(karapace_config))
+ kc = Karapace(config_path)
+ return kc
+
+
+async def enum_schema_compatibility_checks(c, compatibility):
+ subject = os.urandom(16).hex()
+
+ res = await c.put("config", json={"compatibility": compatibility})
+ assert res.status == 200
+ schema = {
+ "type": "record",
+ "name": "myenumtest",
+ "fields": [{
+ "type": {
+ "type": "enum",
+ "name": "enumtest",
+ "symbols": ["first", "second"],
+ },
+ "name": "faa",
+ }]
+ }
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema)},
+ )
+ assert res.status == 200
+ assert "id" in res.json()
+ schema_id = res.json()["id"]
+ schema = {
+ "type": "record",
+ "name": "myenumtest",
+ "fields": [{
+ "type": {
+ "type": "enum",
+ "name": "enumtest",
+ "symbols": ["first", "second", "third"],
+ },
+ "name": "faa",
+ }]
+ }
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema)},
+ )
+ assert res.status == 200
+ assert "id" in res.json()
+ schema_id2 = res.json()["id"]
+ assert schema_id != schema_id2
+
+ schema = {
+ "type": "record",
+ "name": "myenumtest",
+ "fields": [{
+ "type": {
+ "type": "enum",
+ "name": "enumtest",
+ "symbols": ["second"],
+ },
+ "name": "faa",
+ }]
+ }
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema)},
+ )
+ assert res.status == 200
+ assert "id" in res.json()
+ schema_id3 = res.json()["id"]
+ assert schema_id3 != schema_id2
+
+ res = await c.get("schemas/ids/{}".format(schema_id3))
+ assert res.status_code == 200
+ res = jsonlib.loads(res.json()["schema"])
+ assert res["type"] == "record"
+ assert res["name"] == "myenumtest"
+ assert res["fields"][0]["name"] == "faa"
+ assert res["fields"][0]["type"]["type"] == "enum"
+ assert res["fields"][0]["type"]["name"] == "enumtest"
+ assert res["fields"][0]["type"]["symbols"] == ["second"]
+
+
+async def record_nested_schema_compatibility_checks(c):
+ subject = os.urandom(16).hex()
+
+ res = await c.put("config", json={"compatibility": "BACKWARD"})
+ assert res.status == 200
+ schema = {
+ "type": "record",
+ "name": "Objct",
+ "fields": [
+ {
+ "name": "first_name",
+ "type": "string",
+ },
+ {
+ "name": "nested_record_name",
+ "type": {
+ "name": "first_name_record",
+ "type": "record",
+ "fields": [
+ {
+ "name": "first_name",
+ "type": "string",
+ },
+ ],
+ }
+ },
+ ]
+ }
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema)},
+ )
+ assert res.status == 200
+ assert "id" in res.json()
+
+ # change string to integer in the nested record, should fail
+ schema["fields"][1]["type"]["fields"][0]["type"] = "int"
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema)},
+ )
+ assert res.status == 409
+
+
+async def compatibility_endpoint_checks(c):
+ subject = os.urandom(16).hex()
+ schema = {
+ "type": "record",
+ "name": "Objct",
+ "fields": [
+ {
+ "name": "age",
+ "type": "int",
+ },
+ ]
+ }
+
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema)},
+ )
+ assert res.status == 200
+
+ res = await c.get("schemas/ids/{}".format(res.json()["id"]))
+ schema_gotten_back = jsonlib.loads(res.json()["schema"])
+ assert schema_gotten_back == schema
+
+ # replace int with long
+ schema["fields"] = [{"type": "long", "name": "age"}]
+ res = await c.post(
+ "compatibility/subjects/{}/versions/latest".format(subject),
+ json={"schema": jsonlib.dumps(schema)},
+ )
+ assert res.status == 200
+ assert res.json() == {"is_compatible": True}
+
+ schema["fields"] = [{"type": "string", "name": "age"}]
+ res = await c.post(
+ "compatibility/subjects/{}/versions/latest".format(subject),
+ json={"schema": jsonlib.dumps(schema)},
+ )
+ assert res.status == 200
+ assert res.json() == {"is_compatible": False}
+
+
+async def record_schema_compatibility_checks(c):
+ subject = os.urandom(16).hex()
+
+ res = await c.put("config", json={"compatibility": "FORWARD"})
+ assert res.status == 200
+ schema = {
+ "type": "record",
+ "name": "Objct",
+ "fields": [
+ {
+ "name": "first_name",
+ "type": "string",
+ },
+ ]
+ }
+
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema)},
+ )
+ assert res.status == 200
+ assert "id" in res.json()
+ schema_id = res.json()["id"]
+
+ schema2 = {
+ "type": "record",
+ "name": "Objct",
+ "fields": [
+ {
+ "name": "first_name",
+ "type": "string"
+ },
+ {
+ "name": "last_name",
+ "type": "string"
+ },
+ ]
+ }
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema2)},
+ )
+ assert res.status == 200
+ assert "id" in res.json()
+ schema_id2 = res.json()["id"]
+ assert schema_id != schema_id2
+
+ schema3 = {
+ "type": "record",
+ "name": "Objct",
+ "fields": [
+ {
+ "name": "last_name",
+ "type": "string"
+ },
+ {
+ "name": "third_name",
+ "type": "string",
+ "default": "foodefaultvalue"
+ },
+ ]
+ }
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema3)},
+ )
+ # Fails because field removed
+ assert res.status == 409
+ assert res.json() == {"error_code": 409, "message": "Schema being registered is incompatible with an earlier schema"}
+
+ schema4 = {
+ "type": "record",
+ "name": "Objct",
+ "fields": [
+ {
+ "name": "first_name",
+ "type": "string"
+ },
+ {
+ "name": "last_name",
+ "type": "string"
+ },
+ {
+ "name": "third_name",
+ "type": "string",
+ "default": "foodefaultvalue"
+ },
+ ]
+ }
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema4)},
+ )
+ assert res.status == 200
+
+ res = await c.put("config", json={"compatibility": "BACKWARD"})
+ schema5 = {
+ "type": "record",
+ "name": "Objct",
+ "fields": [
+ {
+ "name": "first_name",
+ "type": "string"
+ },
+ {
+ "name": "last_name",
+ "type": "string"
+ },
+ {
+ "name": "third_name",
+ "type": "string",
+ "default": "foodefaultvalue"
+ },
+ {
+ "name": "fourth_name",
+ "type": "string"
+ },
+ ]
+ }
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema5)},
+ )
+ assert res.status == 409
+
+ # Add a default value for the field
+ schema5["fields"][3] = {"name": "fourth_name", "type": "string", "default": "foof"}
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema5)},
+ )
+ assert res.status == 200
+ assert "id" in res.json()
+
+ # Try to submit schema with a different definition
+ schema5["fields"][3] = {"name": "fourth_name", "type": "int", "default": 2}
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": jsonlib.dumps(schema5)},
+ )
+ assert res.status == 409
+
+
+async def schema_checks(c):
+ subject = os.urandom(16).hex()
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": '{"type": "string"}'},
+ )
+ assert res.status == 200
+ assert "id" in res.json()
+ schema_id = res.json()["id"]
+
+ res = await c.get("schemas/ids/{}".format(res.json()["id"]))
+ assert res.status_code == 200
+ assert res.json()["schema"] == '"string"'
+
+ # repost same schema again to see that a new id is not generated but an old one is given back
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": '{"type": "string"}'},
+ )
+ assert res.status == 200
+ assert "id" in res.json()
+ assert schema_id == res.json()["id"]
+
+ # nonexistent schema id
+ result = await c.get(os.path.join("schemas/ids/{}".format(123456789)))
+ assert result.json()["error_code"] == 40403
+
+ res = await c.post(
+ "subjects/{}/versions".format(subject), json={"schema": "{\"type\": \"string\", \"foo\": \"string\"}"}
+ )
+ assert res.status_code == 200
+ assert "id" in res.json()
+ assert schema_id != res.json()["id"]
+
+ # Fetch the schema back to see how it was mangled
+ result = await c.get(os.path.join("schemas/ids/{}".format(res.json()["id"])))
+ schema = jsonlib.loads(result.json()["schema"])
+ assert schema["type"] == "string"
+ assert schema["foo"] == "string"
+
+ res = await c.get("subjects")
+ assert res.status_code == 200
+ assert subject in res.json()
+
+ res = await c.get("subjects/{}/versions".format(subject))
+ assert res.status_code == 200
+ assert res.json() == [1, 2]
+
+ res = await c.get("subjects/{}/versions/1".format(subject))
+ assert res.status_code == 200
+ assert res.json()["subject"] == subject
+
+ # Find an invalid version 0
+ res = await c.get("subjects/{}/versions/0".format(subject))
+ assert res.status_code == 422
+ assert res.json()["error_code"] == 42202
+ assert res.json()["message"] == \
+ 'The specified version is not a valid version id. Allowed values are between [1, 2^31-1] and the string "latest"'
+
+ # Find an invalid version (too large)
+ res = await c.get("subjects/{}/versions/15".format(subject))
+ assert res.status_code == 404
+ assert res.json()["error_code"] == 40402
+ assert res.json()["message"] == "Version not found."
+
+ # Delete an actual version
+ res = await c.delete("subjects/{}/versions/1".format(subject))
+ assert res.status_code == 200
+ assert res.json() == 1
+
+ # Delete a whole subject
+ res = await c.delete("subjects/{}".format(subject))
+ assert res.status_code == 200
+ assert res.json() == [2]
+
+ # List all subjects, our subject shouldn't be in the list
+ res = await c.get("subjects")
+ assert res.status_code == 200
+ assert subject not in res.json()
+
+
+async def config_checks(c):
+ # Tests /config endpoint
+ res = await c.put("config", json={"compatibility": "FULL"})
+ assert res.status_code == 200
+ assert res.json()["compatibility"] == "FULL"
+ assert res.headers["Content-Type"] == "application/vnd.schemaregistry.v1+json"
+
+ res = await c.get("config")
+ assert res.status_code == 200
+ assert res.json()["compatibilityLevel"] == "FULL"
+ assert res.headers["Content-Type"] == "application/vnd.schemaregistry.v1+json"
+
+ res = await c.put("config", json={"compatibility": "NONE"})
+ assert res.status_code == 200
+ assert res.json()["compatibility"] == "NONE"
+ assert res.headers["Content-Type"] == "application/vnd.schemaregistry.v1+json"
+
+ res = await c.put("config", json={"compatibility": "nonexistentmode"})
+ assert res.status_code == 422
+ assert res.json()["error_code"] == 42203
+ assert res.json()["message"] == "Invalid compatibility level. Valid values are none, backward, forward and full"
+ assert res.headers["Content-Type"] == "application/vnd.schemaregistry.v1+json"
+
+ # Create a new subject so we can try setting its config
+ subject = os.urandom(16).hex()
+ res = await c.post(
+ "subjects/{}/versions".format(subject),
+ json={"schema": '{"type": "string"}'},
+ )
+ assert res.status_code == 200
+ assert "id" in res.json()
+
+ res = await c.get("config/{}".format(subject))
+ assert res.status_code == 404
+ assert res.json()["error_code"] == 40401
+ assert res.json()["message"] == "Subject not found."
+
+ res = await c.put("config/{}".format(subject), json={"compatibility": "FULL"})
+ assert res.status_code == 200
+ assert res.json()["compatibility"] == "FULL"
+ assert res.headers["Content-Type"] == "application/vnd.schemaregistry.v1+json"
+
+ res = await c.get("config/{}".format(subject))
+ assert res.status_code == 200
+ assert res.json()["compatibilityLevel"] == "FULL"
+
+
+async def test_local(session_tmpdir, kafka_server, aiohttp_client):
+ datadir = session_tmpdir()
+
+ kc = create_service(datadir, kafka_server)
+ client = await aiohttp_client(kc.app)
+ c = Client(client=client)
+
+ await schema_checks(c)
+ await compatibility_endpoint_checks(c)
+ await record_schema_compatibility_checks(c)
+ await record_nested_schema_compatibility_checks(c)
+ for compatibility in {"FORWARD", "BACKWARD", "FULL"}:
+ await enum_schema_compatibility_checks(c, compatibility)
+ await config_checks(c)
+ kc.close()
+
+
+async def test_remote():
+ server_uri = os.environ.get("SERVER_URI")
+ if not server_uri:
+ pytest.skip("SERVER_URI env variable not set")
+ c = Client(server_uri=os.environ.get("SERVER_URI"))
+ await schema_checks(c)
+ await compatibility_endpoint_checks(c)
+ await record_schema_compatibility_checks(c)
+ await record_nested_schema_compatibility_checks(c)
+ for compatibility in {"FORWARD", "BACKWARD", "FULL"}:
+ await enum_schema_compatibility_checks(c, compatibility)
+ await config_checks(c)
diff --git a/tests/utils.py b/tests/utils.py
new file mode 100644
index 000000000..c63f4ffc8
--- /dev/null
+++ b/tests/utils.py
@@ -0,0 +1,87 @@
+"""
+karapace - test utils
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import requests
+import os
+
+
+class Result:
+ def __init__(self, status, json_result, headers=None):
+ # We create both status and status_code so people can be agnostic on whichever to use
+ self.status_code = status
+ self.status = status
+ self.json_result = json_result
+ self.headers = headers if headers else {}
+
+ def json(self):
+ return self.json_result
+
+
+class Client:
+ def __init__(self, server_uri=None, client=None):
+ self.server_uri = server_uri
+ self.session = requests.Session()
+ self.client = client
+
+ async def get(self, path, headers=None):
+ if not headers:
+ headers = {}
+ if self.client:
+ async with self.client.get(
+ path,
+ headers=headers,
+ ) as res:
+ json_result = await res.json()
+ return Result(res.status, json_result, headers=res.headers)
+ elif self.server_uri:
+ res = requests.get(os.path.join(self.server_uri, path))
+ return Result(status=res.status_code, json_result=res.json(), headers=res.headers)
+
+ async def delete(self, path, headers=None):
+ if not headers:
+ headers = {}
+ if self.client:
+ async with self.client.delete(
+ path,
+ headers=headers,
+ ) as res:
+ json_result = await res.json()
+ return Result(res.status, json_result, headers=res.headers)
+ elif self.server_uri:
+ res = requests.delete(os.path.join(self.server_uri, path))
+ return Result(status=res.status_code, json_result=res.json(), headers=res.headers)
+
+ async def post(self, path, json, headers=None):
+ if not headers:
+ headers = {"Content-Type": "application/vnd.schemaregistry.v1+json"}
+
+ if self.client:
+ async with self.client.post(
+ path,
+ headers=headers,
+ json=json,
+ ) as res:
+ json_result = await res.json()
+ return Result(res.status, json_result, headers=res.headers)
+ elif self.server_uri:
+ res = self.session.post(os.path.join(self.server_uri, path), headers=headers, json=json)
+ return Result(status=res.status_code, json_result=res.json(), headers=res.headers)
+
+ async def put(self, path, json, headers=None):
+ if not headers:
+ headers = {"Content-Type": "application/vnd.schemaregistry.v1+json"}
+
+ if self.client:
+ async with self.client.put(
+ path,
+ headers=headers,
+ json=json,
+ ) as res:
+ json_result = await res.json()
+ return Result(res.status, json_result, headers=res.headers)
+ elif self.server_uri:
+ res = self.session.put(os.path.join(self.server_uri, path), headers=headers, json=json)
+ return Result(status=res.status_code, json_result=res.json(), headers=res.headers)
diff --git a/version.py b/version.py
new file mode 100644
index 000000000..1c0e17cdf
--- /dev/null
+++ b/version.py
@@ -0,0 +1,58 @@
+"""
+karapace - version
+
+Copyright (c) 2019 Aiven Ltd
+See LICENSE for details
+"""
+import imp
+import os
+import subprocess
+
+
+def save_version(new_ver, old_ver, version_file):
+ if not new_ver:
+ return False
+ version_file = os.path.join(os.path.dirname(__file__), version_file)
+ if not old_ver or new_ver != old_ver:
+ with open(version_file, "w") as fp:
+ fp.write('"""{}"""\n__version__ = "{}"\n'.format(__doc__, new_ver))
+ return True
+
+
+def get_project_version(version_file):
+ version_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), version_file)
+ try:
+ module = imp.load_source("verfile", version_file)
+ file_ver = module.__version__
+ except IOError:
+ file_ver = None
+
+ os.chdir(os.path.dirname(__file__) or ".")
+ try:
+ git_out = subprocess.check_output(["git", "describe", "--always"],
+ stderr=getattr(subprocess, "DEVNULL", None))
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ git_ver = git_out.splitlines()[0].strip().decode("utf-8")
+ if "." not in git_ver:
+ git_ver = "0.0.1-0-unknown-{}".format(git_ver)
+ if save_version(git_ver, file_ver, version_file):
+ return git_ver
+
+ makefile = os.path.join(os.path.dirname(__file__), "Makefile")
+ if os.path.exists(makefile):
+ with open(makefile, "r") as fp:
+ lines = fp.readlines()
+ short_ver = [line.split("=", 1)[1].strip() for line in lines if line.startswith("short_ver")][0]
+ if save_version(short_ver, file_ver, version_file):
+ return short_ver
+
+ if not file_ver:
+ raise Exception("version not available from git or from file {!r}".format(version_file))
+
+ return file_ver
+
+if __name__ == "__main__":
+ import sys
+ get_project_version(sys.argv[1])