diff --git a/changelog.md b/changelog.md index a62d36de4..8502507f5 100644 --- a/changelog.md +++ b/changelog.md @@ -49,12 +49,12 @@ - [fea] Disabled comparators for `sparse`-indexes. From now `sparse`-indexes always use more effective IdSets instead - [fea] Added `DeleteMeta` method and simplified meta storing/reading logic - [fix] Fixed race condition in the JOINS cache -- [fix] Fixed results serialization for MERGE-queries with multiple JOINS in cases when some of the JOIN-queries have not joined any items +- [fix] Fixed results serialization for MERGE-queries with multiple JOINS in cases when some JOIN-queries have not joined any items - [fix] Fixed `distinct` result in cases when `WHERE`-condition contains duplicate values (e.g. `distinct(id) WHERE id IN (1,1,1,3)`) ## Fulltext - [fea] Increased max `merge_limit` value (new values range is `[1, 0x1FFFFFFF]`) -- [fix] Fixed [phrase search](fulltext.md#phrase-search) behavior for the phrases containig single word +- [fix] Fixed [phrase search](fulltext.md#phrase-search) behavior for the phrases containing single word ## Go connector - [fea] Added `EnumMeta` and `DeleteMeta` functions @@ -70,8 +70,8 @@ - [fea] Added possibility to delete namespace's Meta data - [fea] Added Git documentation link for the Bm25 config - [fea] Added explicit null-values in the Grid view -- [fix] Fixed the inform window that appeared on the Cancel button on the NS Config page -- [fix] Fixed extra data uploading on the Performace page +- [fix] Fixed to inform window that appeared on the Cancel button on the NS Config page +- [fix] Fixed extra data uploading on the Performance page - [fix] Changed Git documentation link in the Main menu - [fix] Fixed console issues on the add/edit indexes - [fix] Fixed caching of the NS config @@ -96,18 +96,18 @@ # Version 3.23.0 (07.03.2024) ## Core -- [fea] Improved queries condition injection injection on the both directions: joined-to-main and main-to-joined +- [fea] Improved queries condition injection on the both directions: joined-to-main and main-to-joined - [fea] Improved heuristic for the joins preselect/condition injection. If query has some conditions with high selectivity, scheduler will try to avoid high cost preselect/injections attempts - [fea] Added logic for the intersecting conditions merge on the query preprocessing stage -- [fea] Added clock wrapper to force [vDSO](https://ru.manpages.org/vdso/7) for the `clock_gettime`-calls. This gives up to 20% overall performance on the some operation systems, which using libstdc++ without vDSO (for example, on Centos7) +- [fea] Added clock wrapper to force [vDSO](https://ru.manpages.org/vdso/7) for the `clock_gettime`-calls. This gives up to 20% overall performance on some operating systems, which using libstdc++ without vDSO (for example, on Centos7) - [fea] Updated bundled libbactrace to support DWARF-v5 debug info format - [fix] Fixed indexes/count cache drop on the indexes config update ## Go connector -- [fea] Optimized handling of the unknow fields in the Go binding (in cases, when some field exists in the database, but not in the go-struct) +- [fea] Optimized handling of the unknown fields in the Go binding (in cases, when some field exists in the database, but not in the go-struct) ## Deploy -- [fea] Updated Redos build/deploy image to v3.3.4 +- [fea] Updated RedOS build/deploy image to v3.3.4 ## Face - [fea] Added Prettify feature to the SQL editor @@ -134,7 +134,7 @@ - [fix] Disabled in-memory WAL for the system namespaces ## Replication -- [fix] Improved transactions replication via WAL queries: from now transactions will not be splitted into separate insert/updates on the follower +- [fix] Improved transactions replication via WAL queries: from now transactions will not be split into separate insert/updates on the follower ## Face - [fix] Fixed the Check request @@ -144,7 +144,7 @@ ## Core - [fea] Added `explain` results for the [subqueries](readme.md#subqueries-nested-queries) - [fea] Added support for limit/offset in `Delete` and `Update` queries -- [fea] Optimized ordered indexes' cache logic to achive more cache hits and more compact cache size +- [fea] Optimized ordered indexes' cache logic to achieve more cache hits and more compact cache size - [fea] Added support for `COUNT_CACHED(*)`/`CachedTotal()` aggregation in the queries with INNER JOINS. Now it's possible to cache total count results for such queries - [fix] Fixed SQL parsing for combinations of the [subqueries](readme.md#subqueries-nested-queries) and other conditions in the main query - [fix] Fixed [select functions](fulltext.md#using-select-functions) with '.' delimiter. Previously those functions actually expected '=' as a delimiter @@ -178,11 +178,11 @@ - [fix] Fixed the empty space between the last NS and the Total section on the Memory page - [fix] Fixed the title changing on the NS page during a new NS creating - [fix] Fixed the tooltip position in the sidebar menu -- [fix] Fixed the "+" button for the Expire after field +- [fix] Fixed the "+" button for the Expire after field # Version 3.21.0 (15.12.2023) ## Core -- [fea] Added [subqueries](readme.md#subqueries-nested-queries) support (`explain` for subqueries will be implement in the next releases) +- [fea] Added [subqueries](readme.md#subqueries-nested-queries) support (`explain` for subqueries will be implemented in the next releases) - [fea] Added backtraces/minidump support for Windows platform - [fea] Added query crash tracker support for Windows platform - [fix] Added explicit error for aggregations in joined queries @@ -198,7 +198,7 @@ ## Go connector - [fea] Added Go API and DSL-convertor for subqueries -- [fea] Changed CJSON-to-object convetrion logic for slices: now the single JSON values and fields with multiple JSON-paths will be concatenated together in the target field +- [fea] Changed CJSON-to-object conversions logic for slices: now the single JSON values and fields with multiple JSON-paths will be concatenated together in the target field - [fea] Added `WithStrictJoinHandlers`. This option allows to validate JoinHandlers usage in runtime - [fix] Fixed panic handling in the CJSON deserialization - [fix] Fixed logging in `cproto`-binding. Error messages will no longer be redirected to stdout instead of user's logger @@ -225,24 +225,24 @@ - [fea] Added crash query report for Update/Delete queries - [fea] Added configs for LRU index cache size (check section `cache` in the `namespaces` entity of the `#config`-namespace) - [fea] Optimized CJSON deserialization -- [fea] Optimized WAL size (unchaged indexes and schemas will not be added into the WAL anymore) +- [fea] Optimized WAL size (unchanged indexes and schemas will not be added into the WAL anymore) - [fea] Added atomic rollback for multiple field UPDATE-queries in case of errors during query execution (currently it is atomic on the level of the individual document) -- [fix] Fixed column indexes optimization (some of the comparators have become noticeably more effective) +- [fix] Fixed column indexes optimization (some comparators have become noticeably more effective) - [fix] Added PK check for UPDATE-queries - [fix] Fixed JOINs on composite indexes - [fix] Fixed select functions with MERGE queries for cases when the same namespace is merged multiple times -- [fix] Fixed non-indexed string/int fields convertion on index creation in cases when index type is not equal to the field type -- [fix] Disabled leveldb's log files (in rare cases those logs could lead to the problems with storage reopenning) +- [fix] Fixed non-indexed string/int fields conversion on index creation in cases when index type is not equal to the field type +- [fix] Disabled leveldb's log files (in rare cases those logs could lead to the problems with storage reopening) - [fix] Disable background indexes optimization for the temporary namespaces -- [fix] Removed attempts to reopen storage with flush errors befors it's destruction -- [fix] Some of the storage flushes were moved outside of the unique namespaces lock +- [fix] Removed attempts to reopen storage with flush errors before it's destruction +- [fix] Some storage flushes were moved outside the unique namespaces lock - [fix] Fixed directories deletion for Windows ## Replication -- [fea] Reduced memory consumptio for online-updates +- [fea] Reduced memory consumption for online-updates - [fix] Fixed updates size calculation during max allowed updates size check (now it checks actual allocated memory, not the data size) - [fix] Namespaces config applying after FORCE-sync replication -- [fix] Fixed some rare tagsmatcher conflicts in replicated namespaces in case when namespace was not replicted previously and had some data in it +- [fix] Fixed some rare tagsmatcher conflicts in replicated namespaces in case when namespace was not replicated previously and had some data in it - [fix] Fixed some unnecessary force syncs after go clients connection - [fix] Fixed documents duplication after PK change in UPDATE-query - [fix] Fixed logical race in cascade replication resync @@ -257,12 +257,12 @@ ## Go connector - [fea] Added support for UNIX domain sockets in cproto-binding (dsn format: `ucproto://:/`, example: `ucproto:///tmp/reindexer.sock:/my_db`) -- [fix] Fixed deadlock in `EnableLogger` method betweenn Go and C++ mutexes +- [fix] Fixed deadlock in `EnableLogger` method between Go and C++ mutexes ## Face - [fea] Added the ability to use hot keys to navigate over the UI - [fea] Added the link to the documentation to the left bar menu -- [fea] Changed the column filter with the case sensitive one +- [fea] Changed the column filter with the case-sensitive one - [fea] Added validation of the JSON paths field to the Index Config page - [fea] Added the wal_size field to the Namespace config - [fea] Added the preselect_us and field_type sections fto the Explain page @@ -274,20 +274,20 @@ - [fix] Fixed the operations with the last column for the Grid view ## Build -- [fea] Added support for almalinux-9 builds in [dependencies.sh](dependencies.sh) +- [fea] Added support for AlmaLinux 9 builds in [dependencies.sh](dependencies.sh) # Version 3.19.0 (16.09.2023) ## Core -- [fea] Added background namespaces deleteion. Previously in some cases atomic transactions could cause the namespace's deletion in the querie's execution thread, which lead to the latency spike +- [fea] Added background removal of namespaces. Previously in some cases atomic transactions could cause the namespace's deletion in the querie's execution thread, which lead to the latency spike - [fea] Added locks and flush timings for UPDATE and DELETE queries into [slow log](readme.md#slow-actions-logging) - [fea] Added support for the [COUNT/COUNT_CACHED](readme.md#aggregations) aggregations for the MERGE-queries - [fea] Added basic support for the SUM/MAX/MIN aggregations for the MERGE-queries without LIMIT/OFFSET -- [fea] Added more information about JOINs condtitions injection into `explain` (check `on_conditions_injections` field in the explain results) +- [fea] Added more information about JOINs conditions injection into `explain` (check `on_conditions_injections` field in explain results) - [fea] Added `field_type` into the `explain`. This fields shows, which kind of field/comparator was used in the query: `indexed` or `non-indexed` - [fea] Added support for store (`-`) UUID indexes -- [fea] Optimized string<->UUID convertions in the C++ library -- [fix] Fixed `total_us` time calculation in the `explain` (previously `preselect` time was in the explain results, but was not counted in the `total_us` field) -- [fix] Fixed JOINs conditions injection for the queries with multiple JOINs. Previously this optimization could skip some of the JOIN queries +- [fea] Optimized string<->UUID conversions in the C++ library +- [fix] Fixed `total_us` time calculation in the `explain` (previously `preselect` time was in explain results, but was not counted in the `total_us` field) +- [fix] Fixed JOINs conditions injection for the queries with multiple JOINs. Previously this optimization could skip some JOIN queries - [fix] Fixed multiple UPDATE arrays concatenation issues: empty arrays, string array, array with scalar, etc. - [fix] Composite indexes over the other composite indexes were explicitly disabled - [fix] Fixed check for nested JOINs and MERGEs (nested JOIN/MERGE is not allowed) @@ -295,7 +295,7 @@ - [fix] Fixed crash on the CJSON deserialization for array indexes with multiple JSON-paths ## Replication -- [fix] Fixed handling of the updates buffer overflow (previously this logic may lead to multiple extra resyncs) +- [fix] Fixed handling of the updates buffer overflow (previously this logic may lead to multiple extra re-syncs) ## Fulltext - [fea] Added experimental support for Armenian, Hebrew, Arabic and Devanagari alphabets (prefixes/postfixes/suffixes search, typos, stop-words and synonyms are supported) @@ -303,12 +303,12 @@ ## Reindexer server - [fix] Fixed SSE4.2 support check on startup (server has to print error message if SSE support is required for the current build) - [fix] Fixed DELETE-queries compatibility between `reindexer_server v3` and `reindexer_tool v4` in cproto mode -- [fix] Fixed RPC-timeout handling (extra check for unread data wasd added). Previously connect could be dropped in case of the some very long queries +- [fix] Fixed RPC-timeout handling (extra check for unread data wasd added). Previously connect could be dropped in case of some very long queries ## Go connector -- [fea] Optimized string<->UUID convertions in the Go bindings +- [fea] Optimized string<->UUID conversions in the Go bindings - [fea] Default balancing algorithm for `cproto` was changed from RoundRobin to PowerOfTwoChoices. RoundRobin may still be enabled via `WithConnPoolLoadBalancing` option of the binding -- [fix] Fixed transactions `Commit`/`Rollback` stucking in case of the incomatible items' sctructs in client and DB +- [fix] Fixed transactions `Commit`/`Rollback` stacking in case of the incompatible items' structs in client and DB - [fix] Fixed `builtinserver` logger initialization in cases when single app run multiple instances of the `builtinserver` - [fix] Fixed RPC-connections background pinging (Ping requests became asynchronous) @@ -330,10 +330,10 @@ ## Core - [fea] Increased max indexes count for each namespace up to 255 user-defined indexes (previously it was 63) - [fea] Added more info to the [slow logger's](readme.md#slow-actions-logging) output: mutexes timing for transactions and basic `explain` info for `select`-queries -- [fea] Improved logic of the cost evaluation for the btree indexes usage in situations, when backgroud indexes ordering was not completed (right after write operations). Expecting more optimal execution plan in those cases +- [fea] Improved logic of the cost evaluation for the btree indexes usage in situations, when background indexes ordering was not completed (right after write operations). Expecting more optimal execution plan in those cases - [fix] Changed logic of the `ALLSET` operator. Now `ALLSET` condition returns `false` for empty values sets and the result behavior is similar to MongoDB [$all](https://www.mongodb.com/docs/manual/reference/operator/query/all/) - [fix] Fixed automatic conversion for numeric strings with leading or trailing spaces (i.e. ' 1234' or '1234 ') into integers/floats in `WHERE`/`ORDER BY` -- [fix] Allowed non-unique values in forced sort (`ORDER BY (id,4,2,2,5)`). If forced sort order contains same values on the different positions (i.e. `ORDER BY (id,1,2,1,5)`), then the first occurance of the value will be used for sorting +- [fix] Allowed non-unique values in forced sort (`ORDER BY (id,4,2,2,5)`). If forced sort order contains same values on the different positions (i.e. `ORDER BY (id,1,2,1,5)`), then the first occurrence of the value will be used for sorting - [fix] Added limits for the large values sets in the composite indexes substitution algorithm, introduced in v3.15.0 (due to performance issues in some cases). If the result size of the set is exceeding corresponding limit, reindexer will try to find another composite index or skip the substitution ## Go connector @@ -344,7 +344,7 @@ - [fea] Enabled SSE4.2 for the default reindexer's builds and for the prebuilt packages. SSE may still be disabled by passing `-DENABLE_SSE=OFF` to `cmake` command ## Face -- [fea] Changed the scale window icon for textareas +- [fea] Changed the scale window icon for text fields - [fea] Added the background color to the Close icon in the search history on the Namespace page - [fea] Improved the buttons' behavior on the Query builder page - [fea] Added the database name size limit. @@ -371,7 +371,7 @@ - [fix] Disabled composite indexes over non-indexed fields (except fulltext indexes) and sparse composite indexes. Previously those indexes could be created, but was not actually implemented, so queries with them could lead to unexpected errors - [fix] Fixed merging of the conditions by the same index in queries' optimizer (previously it could sometimes cause SIGABORT in case of the empty resulting set) - [fix] Disabled LIMIT for internal merged queries (it was not implemented and did not work properly on previous versions) -- [fix] Fixed cache behavior for fulltext queires with JOINs and `enable_preselect_before_ft: true` +- [fix] Fixed cache behavior for full-text queries with JOIN and `enable_preselect_before_ft: true` ## Reindexer server - [fea] Optimized perfstats config access @@ -389,7 +389,7 @@ - [fea] Added UUID field type for indexes - [fix] Fixed fulltext areas highlighting for the queries with prefixes/suffixes/postfixes - [fix] Fixed segfault on max unique json names overflowing -- [fix] Dissalowed system namespaces deletion/renaming +- [fix] Disallowed system namespaces deletion/renaming ## Go connector - [fea] Added support for OpenTelemetry traces. [Details](readme.md#tracing) @@ -414,7 +414,7 @@ # Version 3.15.0 (24.04.2023) ## Core -- [fea] Improved typos handling agorithm for `text`-indexes. New options: `max_typo_distance`,`max_symbol_permutation_distance`,`max_missing_letters` and `max_extra_letters` were added +- [fea] Improved typos handling algorithm for `text`-indexes. New options: `max_typo_distance`,`max_symbol_permutation_distance`,`max_missing_letters` and `max_extra_letters` were added - [fea] Improved composite indexes substitution. Now optimizer is able to find composite index' parts on almost any positions in query and choose the largest corresponding composite index if multiple substitutions are available - [fea] Improved sorting by fields from joined-namespaces (for `inner joins`). Added support for string types and improved conversion logic (previously this type of sorting were able to sort only numeric fields) - [fea] Added more options for `snippet_n`: `with_area`, `left_bound` and `right_bound`. [Details](fulltext.md#snippet_n) @@ -424,7 +424,7 @@ - [fea] Added support for non-string map keys for msgpack decoded (positive and negative integer values may be used as tag names in msgpack messages now) ## Go connector -- [fea] Added `OptionConnPoolLoadBalancing`. This options allows to chose load balancing algorithm for cproto connections +- [fea] Added `OptionConnPoolLoadBalancing`. This options allows to choose load balancing algorithm for cproto connections ## Face - [fea] Automized filling the key field on the Meta creation page @@ -462,13 +462,13 @@ ## Reindexer server - [fix] Fixed server connections drops after outdated Close() call from RPC-client - [fix] Fixed 'SetSchema' RPC-call logging -- [fix] Enlarged stop attemtps before SIGKILL for reindexer service +- [fix] Enlarged stop attempts before SIGKILL for reindexer service ## Go connector -- [fix] Fixed client connections drops after some of the queries time outs (CPROTO) +- [fix] Fixed client connections drops after some queries time outs (CPROTO) ## Face -- [fix] Fixed minor issues with queriesperfstats and explain +- [fix] Fixed minor issues with queries perfstats and explain - [fix] Fixed the visible field set storing for the same SQL queries - [fix] Restored the Load more feature to the Connections page @@ -476,7 +476,7 @@ ## Core - [fea] Reworked fulltext search for multiword phrases in quotes (check description [here](fulltext.md#phrase-search)) - [fea] Optimized database/server termination -- [fea] Improved and optimized suffix tree selection for terms from stemmers in fulltext index +- [fea] Improved and optimized suffix tree selection for terms from stemmers in the full-text index - [fea] Optimized select queries with limit/offset and ordered index in conditions, but without explicit sort orders - [fea] Changed weights for ordered queries with limit/offset and ordered index in `where` or `order by` sections - [fea] Improved performance for large joined namespaces @@ -495,7 +495,7 @@ - [fix] Fixed the column settings for the Statistics table - [fix] Improved the Statistics UI - [fix] Fixed the SQL -> truncate response -- [fix] Fixed the infinity requests to namespases on the Config page +- [fix] Fixed the infinity requests to namespaces on the Config page - [fix] Fixed the validator of the tag field - [fix] Fixed the error on the Explain page - [fix] Fixed issues with the Position boost @@ -504,26 +504,26 @@ # Version 3.13.2 (23.02.2023) ## Core -- [fix] Fixed collisions resolving in indexes hash map (worst case scenarious were significantly improved) -- [fix] Fixed errors handling for custom indexes sort orderes in case of incorrect sort_orders config +- [fix] Fixed collisions resolving in indexes hash map (worst case scenarios were significantly improved) +- [fix] Fixed errors handling for custom indexes sort orders in case of incorrect sort_orders config ## Go connector - [fix] Enlarged default fetch limit for cproto binding ## Reindexer tool - [fix] Fixed large join requests execution (with 10k+ documents in result) -- [fix] Fixed 'pretty' output mode (now reindexer_tool will not stuck on formating after request) +- [fix] Fixed 'pretty' output mode (now reindexer_tool will not stick on formatting after request) ## Build/Deploy - [fea] Switched Fedora's versions for prebuilt packages to Fedora 36/37 ## Face - [fea] Added the bulk deleting items on the list -- [fea] Chnaged the lifetime of the editing form for the Item adding operation -- [fea] Replaced the Create new database label to the Choose a database in the main menu +- [fea] Changed the lifetime of the editing form for the Item adding operation +- [fea] Replaced the Create a new database label to the Choose a database in the main menu - [fea] Forbade entering cyrillic symbols for DB and NS titles - [fea] Added the ability to rollback to the default DB config -- [fea] Improved the filtering on the Namespace page +- [fea] Improved the filtering on the Namespace page - [fea] Replaced the empty page with the inform message on the Meta page - [fea] Disabled the Create NS button if none DB exists - [fea] Added the ability to export the SQL result in CSV @@ -542,7 +542,7 @@ # Version 3.13.1 (19.01.2023) ## Core -- [fix] Fixed wrong sort order for unbuilt tree-indexes combined with unordered conditions (SET/IN/ALLSET) +- [fix] Fixed incorrect sort order for unbuilt tree-indexes combination with unordered conditions (SET/IN/ALLSET) - [fix] Fixed assertion for sparse indexes with multiple JSON-paths - [fix] Fixed assertion for wrong CJSON tags in storage - [fix] Fixed double values restrictions for JSON-documents (previously in some cases negative double values could cause JSON parsing exceptions) @@ -552,7 +552,7 @@ # Version 3.13.0 (12.01.2023) ## Core -- [fix] Fixed composit indexes fields update after new indexes addition +- [fix] Fixed composite indexes fields update after new indexes addition - [fix] Fixed SEGFAULT on string precepts ## Go connector @@ -574,38 +574,38 @@ - [fea] Added the data-test attribute - [fix] Fixed console errors appeared on hover for the Client cell in the Current Statistics - [fix] Fixed the column width resizing on the page reloading -- [fix] Fixed disapiaring of the Item table part on the Namespace page +- [fix] Fixed disappearing of the Item table part on the Namespace page # Version 3.11.0 (02.12.2022) ## Core -- [fea] Added background TCMalloc's cache sizes managment (this feature was also supported for Go-builtin, Go-builtinserver and standalone modes). Check `allocator-cache-*` section in server's config +- [fea] Added background TCMalloc's cache sizes management (this feature was also supported for Go-builtin, Go-built-in server and standalone modes). Check `allocator-cache-*` section in server's config - [fix] Fixed handling of the `no space left` errors for asynchronous storage - [fix] Fixed and improved composite indexes substitution optimization in select-queries -- [fix] Fixed SEGFAULT on large merge limit values in fulltest indexes +- [fix] Fixed SEGFAULT on large merge limit values in fulltext indexes ## Reindexer server -- [fea] Added more info about the storages' states into prometheus metrcis +- [fea] Added more info about the storages' states into prometheus metrics - [fix] Fixed precepts parsing in HTTP API ## Face -- [fea] Replaced the Scrolly component with the Vue-scroll one +- [fea] Replaced the Scroll component with the Vue-scroll one - [fea] Added strings_waiting_to_be_deleted_size to Statistics -> Memory for NC - [fea] Added tooltips to the action bar on the Namespace page - [fea] Added a default value for Rtree type -- [fea] Added tooltips to the sidbar buttons +- [fea] Added tooltips to the sidebar buttons - [fea] Made visible the default options of namespaces - [fea] Changed the Disabled view of the Resent button on the SQL page - [fix] Fixed sending NULL value for max_preselect_part - [fix] Removed the search bar from the Indexes page - [fix] Fixed the title of the index editor -- [fix] Removed oldated libraries usage +- [fix] Removed outdated libraries usage - [fix] Fixed the Expand/Collapse actions for lists - [fix] Fixed the Collapse all button on the Namespace page - [fix] Fixed the Statistics menu pointer # Version 3.10.1 (17.11.2022) ## Go connector -- [fea] Add go.mod file with dependecies versions +- [fea] Add go.mod file with dependencies versions - [ref] Cproto binding now requires explicit import of the `_ "github.com/restream/reindexer/bindings/cproto"`-module ## Repo @@ -616,7 +616,7 @@ - [fea] Improve conditions injection from joined queries into main query. This optimization now covers much wider class of queries - [fea] Add parenthesis expansion for queries' conditions - [fea] Switch yaml parser to more functional one -- [fix] Fix priority of `NOT` operations in queries (now it always has highest priority among all the others logical operations) +- [fix] Fix priority of `NOT` operations in queries (now it always has the highest priority among all the others logical operations) - [fix] Fix race condition in indexes optimization logic ## Reindexer server @@ -633,7 +633,7 @@ - [fea] Add the ability to pin Namespaces in the Namespace list - [fea] Redesign the feature of the column resizing - [fix] Fix UI availability with DB config issues -- [fix] Fix the issue with Scrolly on the Statistics page +- [fix] Fix the issue with Scroll on the Statistics page # Version 3.9.1 (12.10.2022) ## Core @@ -642,7 +642,7 @@ # Version 3.9.0 (06.10.2022) ## Core - [fea] Add `enable_preselect_before_ft` option for fulltext indexes. It allows to prioritize non-fultext indexes in query -- [fix] Fix runtime index convertations (from `sparse` and to `sparse`) +- [fix] Fix runtime index conversions (from `sparse` and to `sparse`) - [fix] Fix fulltext index memstats race - [fix] Fix query results overflow for large joined requests (in cases, when joined items count is more than 10 billions) @@ -655,7 +655,7 @@ - [fea] Improve snackbars view - [fea] Add the pagination instead of the 'load more' feature - [fea] Improve the Precepts UI -- [fea] Add a message about ofline status on 500 server response +- [fea] Add a message about offline status on 500 server response - [fea] Add the description of the 5хх codes to the message body - [fix] Fix a misprint in the database config - [fix] Fix the value array clearing @@ -665,7 +665,7 @@ - [fea] Optimize selects with large id sets - [fea] Optimize max index structures resize time on documents insertion/deletion/modification - [fea] Add max highlighted areas setting for fulltext indexes (`max_areas_in_doc` and `max_total_areas_to_cache`) -- [fix] Add some of the missing memory into #memstats +- [fix] Add some missing memory into #memstats ## Reindexer server - [fea] Reduce allocations count for query result responses @@ -728,7 +728,7 @@ - [fea] Add the Loader icon for heavy requests - [fea] Change the full-text config - [fea] Add the logging of the error messages -- [fea] Add the action disabling for the Add button in the JSON editor when an syntax error is detected +- [fea] Add the action disabling for the Add button in the JSON editor when a syntax error is detected - [fix] Fix filters on the Query builder page - [fix] Fix the pagination issue on the SQL page - [fix] Fix the Namespace settings button position during the browser window resizing @@ -749,14 +749,14 @@ # Version 3.5.0 (14.04.2022) ## Core -- [fea] Optimize composite fulltext indexes rebuilding. Composite ft index will not be rebuild after update, if non of the index's parts were actually changed -- [fea] Add current query to backtrace for builtin/builtinserver modes +- [fea] Optimize composite fulltext indexes rebuilding. Composite ft index will not be rebuild after update, if no part of the index has actually been modified +- [fea] Add current query to backtrace for built-in/built-in server modes - [fix] Fix string copying for array indexes - [fix] Fix ALLSET keyword parsing for SQL ## Face - [fea] Improve `explain` representation for queries with joins -- [fea] Add 'copy' button for fro json preview in Query Builder +- [fea] Add 'copy' button for json preview in Query Builder - [fix] Fix floating point numbers' behavior in numeric inputs in 'config' tab - [fix] Fix 'x' button in 'meta' tab - [fix] Fix type in 'partitial_match_decrease' field in index config @@ -857,7 +857,7 @@ - [fix] Fixed indexes for client stats - [fix] Fixed optimization cancelling for concurrent queries - [fix] Do not rebuild composite indexes after update -- [fix] Removed TSAN suppressions for tests +- [fix] Removed TSAN suppression for tests ## Face - [fea] Added tooltips to Grid columns @@ -870,7 +870,7 @@ - [fix] Fixed the issue with losing a namespace focus during tabs changing - [fix] Performed yarn upgrade - [fix] Fixed the issue with the sorting params keeping -- [fix] Fixed the issue with case sensitive field names during the grid building +- [fix] Fixed the issue with case-sensitive field names during the grid building - [fix] Fixed the issue with slow 3g in the Namespace list - [fix] Fixed the "Default stop words" option on the "Add index" form - [fix] Fixed the issue with the full-text config and full-text synonyms definition config areas on the "Add index" form @@ -882,7 +882,7 @@ # Version 3.2.2 (16.07.2021) ## Core - [fea] Optimize string refs counting for wide-range queries -- [fix] Fix merge limit handling for deleted values in fultext index +- [fix] Fix merge limit handling for deleted values in fulltext index - [fix] Fix cascade replication for nodes without storage - [fix] Fix sorted indexes update @@ -947,7 +947,7 @@ ## Core - [fix] Fixed segfault in fulltext query with brackets -- [fix] Fixed deadlock in selecter in case of concurrent namespace removing +- [fix] Fixed deadlock in selector in case of concurrent namespace removing - [fix] Fixed true/false tokens parsing inside query to composite index ## Reindexer server @@ -979,11 +979,11 @@ # Version 3.1.1 (29.03.2021) ## Core - [fix] Bug in full text query with single mandatory word fixed -- [fix] Bug in query with condition ALLSET by nonindexed field fixed +- [fix] Bug in query with condition ALLSET by non indexed field fixed - [fix] Bug in query with merge and join by the same namespace fixed - [fix] Simultaneous update of field and whole object fixed - [fix] Build on aarch64 architecture fixed -- [fix] Fixed replication updates limit trackig, and possible inifity full namespace sync +- [fix] Fixed replication updates limit tracking, and possible infinity full namespace sync - [fix] Fixed web face page corruption on Windows builds ## Reindexer server @@ -1004,7 +1004,7 @@ - [fix] Mandatory terms with multiword synonyms in fulltext queries fixed - [fea] Verification of EQUAL_POSITION by the same field added - [fea] Added new syntax for update of array's elements -- [fea] Impoved verification of fulltext index configuration +- [fea] Improved verification of fulltext index configuration ## Reindexer server - [fea] api/v1/check returns more information @@ -1038,7 +1038,7 @@ - [fix] Build builtin/builtinserver on mingw ## Face -- [fea] Added tooltips to longest query +- [fea] Added tooltips to the longest query - [fix] Fixed the query view on the Query -> SQL page - [fix] Added checking for unsaved data during the window closing - [fix] Bug with the pagination in the List mode @@ -1063,7 +1063,7 @@ # Version 3.0.1 (31.12.2020) ## Core -- [fix] Search by multi word synonyms is fixed +- [fix] Search by multi-word synonyms is fixed - [fix] Comparator performance issue of condition IN (many strings) ## Face @@ -1111,7 +1111,7 @@ - [fix] Fix outdated namespace removing from prometheus stats ## Reindexer tool -- [fix] Fix command execution iterrupts on SIGINT +- [fix] Fix command execution interrupts on SIGINT - [fix] Disable replicator for reindexer_tool ## Go connector @@ -1124,8 +1124,8 @@ - [fea] Added extra parameter to clients stats - [fea] Added update, delete, truncate statement in DSL - [fix] Added support for equal_positions in sql suggester -- [fix] Crash on distinct whith composite index -- [fix] Crash on query whith incorrect index type after index conversion +- [fix] Crash on distinct with composite index +- [fix] Crash on query with incorrect index type after index conversion ## Reindexer tool - [fix] Crash on upsert array object as first json tag @@ -1297,7 +1297,7 @@ - [fea] Improved behavior while input is redirected # Go connector -- [fix] Enable to create multiple instances of builtinserver +- [fix] Enable to create multiple instances of built-in server - [fea] Multiple dsn support in cproto # Face @@ -1381,7 +1381,7 @@ - [fix] Select fields filter fix for right namespace # Reindexer server -- [fea] web static resources are embeded to server binary by default +- [fea] web static resources are embedded to server binary by default # Version 2.5.5 (07.02.2020) @@ -1440,7 +1440,7 @@ - [fix] Fix assert in sort by composite indexes - [fea] Add composite values parsing for SQL select - [fix] Make circular accumulator for stddev performance statistic -- [fix] Fix unhandled exception while caclulating perf stat +- [fix] Fix unhandled exception while calculating perf stat ## go connector - [fix] RawBuffer leak due to unclosed iterators in transactions @@ -1473,7 +1473,7 @@ ## Core - [fea] Sort by expressions -- [fea] Optimized lock time for joins with small preresult set +- [fea] Optimized lock time for joins with small pre-result set - [fea] Added more info about replication state to #memstat namespace - [fix] LSN on row-based query replication (possible assert on server startup) - [fix] Replication clusterID for namespaces without storage @@ -1511,13 +1511,13 @@ - [fea] Cancelling queries execution by Ctrl+C ## go connector -- [fea] Iterator.NextObj() unmarshals data to any user provided struct +- [fea] Iterator.NextObj() unmarshal data to any user provided struct # Version 2.3.2 (25.10.2019) # Core - [fix] wrong WAL ring buffer size calculation on load from storage -- [fix] Make storage autorepair optional +- [fix] Make storage auto-repair optional - [fix] firstSortIndex assert on sort by hash indexes # Version 2.3.0 (11.10.2019) @@ -1554,7 +1554,7 @@ - [fix] Idset cache invalidation on upsert/delete null values to indexes - [fix] Possible crash if sort orders disabled -- [fix] Wrong lowercasing field name on SQL UPDATE query +- [fix] Wrong lowercase field name on SQL UPDATE query - [fea] Delete & Update queries in transactions ## Reindexer tool @@ -1573,7 +1573,7 @@ - [fix] Fulltext queries sort by another field - [fea] Number of background threads for sort optimization can be changed from #config namespace -- [fix] Sort optimization choose logic is improoved +- [fix] Sort optimization choose logic is improved ## go connector @@ -1585,14 +1585,14 @@ ## Core -- [fea] More effective usage of btree index for GT/LT and sort in concurent read write operations +- [fea] More effective usage of btree index for GT/LT and sort in concurrent read write operations - [fix] Potential crash on index update or deletion - [fea] Timeout of background indexes optimization can be changed from #config namespace ## Reindexer server - [fea] User list moved from users.json to users.yml -- [fea] Hash is used insead of plain password in users.yml file +- [fea] Hash is used instead of plain password in users.yml file - [fix] Pass operation timeout from cproto client to core # Version 2.2.1 (07.09.2019) @@ -1601,7 +1601,7 @@ - [fea] Updated behaviour of Or InnerJoin statement - [fea] Store backups of system records in storage -- [fix] Replicator can start before db initalization completed +- [fix] Replicator can start before db initialization completed - [fix] Search prefixes if enabled only postfixes ## Reindexer server @@ -1621,7 +1621,7 @@ - [fea] Facets by array fields - [fea] JOIN now can be used in expression with another query conditions - [fea] Support rocksdb as storage engine -- [fix] Race on concurent read from system namespaces +- [fix] Race on concurrent read from system namespaces - [fix] Replication config sync fixed ## Reindexer tool @@ -1643,33 +1643,33 @@ ## Core -- [fea] Added two way sync of replication config and namespace +- [fea] Added two-way sync of replication config and namespace - [fea] Memory usage of indexes decreased (tsl::sparesmap has been added) - [fea] Added non-normalized query in queries stats - [fea] Add truncate namespace function -- [fix] Fixed unexpected hang and huge memory alloc on select by uncommited indexes +- [fix] Fixed unexpected hang and huge memory alloc on select by uncommitted indexes - [fix] Correct usage of '*' entry as default in namespaces config -- [fix] Memory statistics calculation are improoved +- [fix] Memory statistics calculation are improved - [fix] Slave will not try to clear expired by ttl records # Version 2.1.2 (04.08.2019) ## Core -- [fea] Added requests execution timeouts and cancelation contexts +- [fea] Added requests execution timeouts and cancellation contexts - [fea] Join memory consumption optimization - [fea] Current database activity statistics - [fea] Use composite indexes for IN condition to index's fields -- [fea] Reset perfomance and queries statistics by write to corresponding namespace +- [fea] Reset performance and queries statistics by write to corresponding namespace - [fix] Crashes on index removal - [fix] Do not lock namespace on tx operations -- [fix] SQL dumper will not add exceeded bracets +- [fix] SQL dumper will not add exceeded brackets - [fea] Added `updated_at` field to namespace attributes # go connector -- [fea] Added requests execution timeouts and cancelation contexts +- [fea] Added requests execution timeouts and cancellation contexts - [fea] Added async tx support - [fea] Removed (moved to core) `updated_at` legacy code @@ -1696,10 +1696,10 @@ ## Core -- [fea] Bracets in DSL & SQL queries +- [fea] Brackets in DSL & SQL queries - [fix] Crash on LRUCache fast invalidation - [fix] Relaxed JSON validation. Symbols with codes < 0x20 now are valid -- [fix] '\0' symbol in JSON will not broke parser +- [fix] The '\0' character in JSON will not break the parser - [fea] Backtrace with line numbers for debug builds - [fix] Replication fixes - [fea] Support for jemalloc pprof features @@ -1723,22 +1723,22 @@ # Version 2.0.3 (04.04.2019) ## Core -- [fea] Facets API improoved. Multiply fields and SORT features +- [fea] Facets API improved. Multiply fields and SORT features - [fea] TTL added - [fea] `LIKE` condition added - [fea] Add expressions support in SQL `UPDATE` statement - [fix] Invalid JSON generation with empty object name -- [fix] Unneccessary updating of tagsmatcher on transactions +- [fix] Unnecessary updating of tagsmatcher on transactions - [fix] LRUCache invalidation crash fix # Reindexer server -- [fea] Added metadata maniplulation methods +- [fea] Added metadata manipulation methods ## Face -- [fea] Added metadata maniplulation GUI -- [fix] Performance statistics GUI improovements +- [fea] Added metadata manipulation GUI +- [fix] Performance statistics GUI improvements # Version 2.0.2 (08.03.2019) @@ -1746,13 +1746,13 @@ - [fea] Update fields of documents, with SQL `UPDATE` statement support - [fea] Add SQL query suggestions - [fea] Add `DISTINCT` support to SQL query -- [fea] Queries to non nullable indexes with NULL condition will return error +- [fea] Queries to non-nullable indexes with NULL condition will return error - [fix] Fixes of full text search, raised on incremental index build - [fix] Queries with forced sort order can return wrong sequences - [fix] RPC client&replicator multithread races - [fix] DISTINCT condition to store indexes - [fix] Caches crash on too fast data invalidation -- [fix] Disable execiton of delete query from namespace in slave mode +- [fix] Disable execution of delete query from namespace in slave mode - [fix] Rebuild fulltext index if configuration changed - [fix] Fixed handling SQL numeric conditions values with extra leading 0 @@ -1799,7 +1799,7 @@ ## Go connector -- [fix] Struct verificator incorrect validation of composite `reindex` tags +- [fix] Struct verifier incorrect validation of composite `reindex` tags - [fea] Pool usage statistics added to `DB.Status()` method ## Reindexer server @@ -1810,7 +1810,7 @@ - [fea] Query builder added - [fea] `Delete all` button added to items page -- [fea] Aggregations results view +- [fea] Aggregation results view - [fea] Edit/Delete function of query results added - [fea] JSON index configuration editor - [fea] Memory usage statistics round precision @@ -1827,7 +1827,7 @@ - [fix] Invalid http redirects, if compiled with -DLINK_RESOURCES ## Reindexer tool -- [fix] Unhandled exception in case trying of create output file in unexisting directory +- [fix] Unhandled exception in case trying of create output file in non-existing directory - [fix] RPC client optimizations and races fixes - [fea] \bench command added @@ -1836,7 +1836,7 @@ ## Core -- [fea] Indexes rebuilding now is non blocking background task, concurrent R-W queries performance increased +- [fea] Indexes rebuilding now is non-blocking background task, concurrent R-W queries performance increased - [fix] Fulltext index incremental rebuild memory grow fixed ## Reindexer server @@ -1861,12 +1861,12 @@ ## Reindexer server -- [fea] REST API documentation improoved +- [fea] REST API documentation improved - [fea] Optimized performance ## Reindexer tool -- [fea] Operation speed is improoved +- [fea] Operation speed is improved # Version 1.10.0 (29.10.2018) @@ -1887,7 +1887,7 @@ ## Go connector - [fea] reindexer.Status method added, to check connector status after initialization -- [fea] OpenNamespace now register namespace <-> struct mapping without server connection requiriment +- [fea] OpenNamespace now register namespace <-> struct mapping without server connection requirement - [fix] int type is now converted to int32/int64 depends on architecture ## Python connector @@ -1896,7 +1896,7 @@ ## Reindexer server -- [fea] Added fields filter to method GET /api/v1/:db/:namespace:/items mathed +- [fea] Added fields filter to method GET /api/v1/:db/:namespace:/items matched - [fea] Added method DELETE /api/v1/:db/query - [fea] Added poll loop backend (osx,bsd) - [ref] `json_path` renamed to `json_paths`, and now array @@ -1913,7 +1913,7 @@ ## Core - [fea] Storing index configuration in storage -- [fea] Concurent R-W queries performance optimization +- [fea] Concurrent R-W queries performance optimization - [fea] Added indexes runtime performance statistics - [fix] Incorrect NOT behaviour on queries with only comparators - [fix] Race condition on shutdown @@ -1926,31 +1926,31 @@ - [fix] Multiple database support in `embeded` mode. ## Reindexer tool -- [fix] Fixed restoring namespaces with index names non equal to json paths +- [fix] Fixed restoring namespaces with index names non-equal to json paths # Version 1.9.6 (03.09.2018) ## Core - [fea] Merge with Join queries support - [fea] Sort by multiple columns/indexes -- [fix] Case insensivity for index/namespaces names +- [fix] Case insensitivity for index/namespaces names - [fix] Sparse indexes behavior fixed -- [fix] Full text index - correct calculatuon of distance between words -- [fix] Race condition on concurent ConfigureIndex requests +- [fix] Full text index - correct calculation of distance between words +- [fix] Race condition on concurrent ConfigureIndex requests ## Reindexer server - [fea] Added modify index method ## Go connector -- [fea] New builtinserver binding: builtin mode for go application + bundled server for external clients -- [fea] Improoved validation of go struct `reindex` tags +- [fea] New built-in server binding: builtin mode for go application + bundled server for external clients +- [fea] Improved validation of go struct `reindex` tags # Version 1.9.5 (04.08.2018) ## Core - [fea] Sparse indexes -- [fix] Fixed errors on conditions to unindexed fields +- [fix] Fixed errors in conditions for non-indexed fields - [fix] Fulltext terms relevancy, then query contains 2 terms included to single word - [fea] Customizable symbols set of "words" symbols for fulltext - [fix] Incorrect behavior on addition index with duplicated json path of another index @@ -1968,7 +1968,7 @@ ## Face -- [fix] Incorrect urlencode for document update API url +- [fix] Incorrect urlencoded for document update API url - [fix] Namespace view layout updated, jsonPath added to table ## Go connector @@ -1982,12 +1982,12 @@ - [fea] Conditions to any fields, even not indexed - [fea] cproto network client added -- [fix] Query execution plan optimizator fixes. +- [fix] Query execution plan optimizator fixes. ## Reindexer tool - [fea] Command line editor. tool has been mostly rewritten at all -- [fea] Interopertion with standalone server +- [fea] Interoperation with standalone server ## Reindexer server @@ -2004,7 +2004,7 @@ ## Core -- [fea] Added system namespaces #memstats #profstats #queriesstats #namespaces with executuin and profiling statistics +- [fea] Added system namespaces #memstats #profstats #queriesstats #namespaces with execution and profiling statistics - [fea] Added system namespace #config with runtime profiling configuration - [fix] Join cache memory limitation - [fix] Fixed bug with cjson parsing in nested objects on delete @@ -2016,7 +2016,7 @@ ## Reindexer Server - [fea] Load data in multiple threads on startup -- [fea] Auto rebalance connection between worker threads +- [fea] Auto re-balance connection between worker threads - [fix] "Authorization" http header case insensitivity lookup - [fix] Unexpected exit on SIGPIPE - [fix] Namespaces names are now url decoded @@ -2122,7 +2122,7 @@ ## C++ core -- [fea] Support join, marge, aggregations in json DSL & SQL queris +- [fea] Support join, marge, aggregations in json DSL & SQL queries - [fea] Added multiline form and comments in SQL query - [fix] Last symbol of documents was not used by fulltext indexer - [fix] Potential data corruption after removing index @@ -2158,7 +2158,7 @@ ## Reindexer server beta released: - [fea] Added cmake package target for RPM & DEB based systems -- [fea] sysv5 initscript added +- [fea] sysv5 init script added - [fea] Binary cproto RPC protocol introduced - [fea] Graceful server shutdown on SIGTERM and SIGINT - [fea] Multiply databases support was implemented diff --git a/clang-tidy/.clang-tidy b/clang-tidy/.clang-tidy deleted file mode 100644 index 23ccde514..000000000 --- a/clang-tidy/.clang-tidy +++ /dev/null @@ -1,30 +0,0 @@ -Checks: 'clang-diagnostic-*, - clang-analyzer-*, - performance-*, - bugprone-*, - -bugprone-exception-escape, - -bugprone-branch-clone, - -bugprone-easily-swappable-parameters, - -bugprone-macro-parentheses, - -bugprone-signed-char-misuse, - -bugprone-narrowing-conversions, - -bugprone-reserved-identifier, - -bugprone-implicit-widening-of-multiplication-result, - -bugprone-assignment-in-if-condition, - -bugprone-parent-virtual-call, - -bugprone-integer-division, - -bugprone-unhandled-self-assignment - -clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling, - -performance-no-int-to-ptr, - -performance-avoid-endl' -# clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling - too many unnecessary warning in vendored code -# performance-no-int-to-ptr - consider how to fix this -# bugprone-macro-parentheses - consider fixing -WarningsAsErrors: '*' -HeaderFilterRegex: '.*(?= 4.0.0 are given under - # the top level key 'Diagnostics' in the output yaml files - mergekey = "Diagnostics" - merged=[] - for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')): - content = yaml.safe_load(open(replacefile, 'r')) - if not content: - continue # Skip empty files. - merged.extend(content.get(mergekey, [])) - - if merged: - # MainSourceFile: The key is required by the definition inside - # include/clang/Tooling/ReplacementsYaml.h, but the value - # is actually never used inside clang-apply-replacements, - # so we set it to '' here. - output = {'MainSourceFile': '', mergekey: merged} - with open(mergefile, 'w') as out: - yaml.safe_dump(output, out) - else: - # Empty the file: - open(mergefile, 'w').close() - - -def find_binary(arg, name, build_path): - """Get the path for a binary or exit""" - if arg: - if shutil.which(arg): - return arg - else: - raise SystemExit( - "error: passed binary '{}' was not found or is not executable" - .format(arg)) - - built_path = os.path.join(build_path, "bin", name) - binary = shutil.which(name) or shutil.which(built_path) - if binary: - return binary - else: - raise SystemExit( - "error: failed to find {} in $PATH or at {}" - .format(name, built_path)) - - -def apply_fixes(args, clang_apply_replacements_binary, tmpdir): - """Calls clang-apply-fixes on a given directory.""" - invocation = [clang_apply_replacements_binary] - invocation.append('-ignore-insert-conflict') - if args.format: - invocation.append('-format') - if args.style: - invocation.append('-style=' + args.style) - invocation.append(tmpdir) - subprocess.call(invocation) - - -def run_tidy(args, clang_tidy_binary, tmpdir, build_path, queue, lock, - failed_files): - """Takes filenames out of queue and runs clang-tidy on them.""" - while True: - name = queue.get() - invocation = get_tidy_invocation(name, clang_tidy_binary, args.checks, - tmpdir, build_path, args.header_filter, - args.allow_enabling_alpha_checkers, - args.extra_arg, args.extra_arg_before, - args.quiet, args.config_file, args.config, - args.line_filter, args.use_color, - args.plugins) - - proc = subprocess.Popen(invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output, err = proc.communicate() - if proc.returncode != 0: - if proc.returncode < 0: - msg = "%s: terminated by signal %d\n" % (name, -proc.returncode) - err += msg.encode('utf-8') - failed_files.append(name) - with lock: - sys.stdout.write(' '.join(invocation) + '\n' + output.decode('utf-8')) - if len(err) > 0: - sys.stdout.flush() - sys.stderr.write(err.decode('utf-8')) - queue.task_done() - - -def main(): - parser = argparse.ArgumentParser(description='Runs clang-tidy over all files ' - 'in a compilation database. Requires ' - 'clang-tidy and clang-apply-replacements in ' - '$PATH or in your build directory.') - parser.add_argument('-allow-enabling-alpha-checkers', - action='store_true', help='allow alpha checkers from ' - 'clang-analyzer.') - parser.add_argument('-clang-tidy-binary', metavar='PATH', - default='clang-tidy-17', - help='path to clang-tidy binary') - parser.add_argument('-clang-apply-replacements-binary', metavar='PATH', - default='clang-apply-replacements-17', - help='path to clang-apply-replacements binary') - parser.add_argument('-checks', default=None, - help='checks filter, when not specified, use clang-tidy ' - 'default') - config_group = parser.add_mutually_exclusive_group() - config_group.add_argument('-config', default=None, - help='Specifies a configuration in YAML/JSON format: ' - ' -config="{Checks: \'*\', ' - ' CheckOptions: {x: y}}" ' - 'When the value is empty, clang-tidy will ' - 'attempt to find a file named .clang-tidy for ' - 'each source file in its parent directories.') - config_group.add_argument('-config-file', default=None, - help='Specify the path of .clang-tidy or custom config ' - 'file: e.g. -config-file=/some/path/myTidyConfigFile. ' - 'This option internally works exactly the same way as ' - '-config option after reading specified config file. ' - 'Use either -config-file or -config, not both.') - parser.add_argument('-header-filter', default=None, - help='regular expression matching the names of the ' - 'headers to output diagnostics from. Diagnostics from ' - 'the main file of each translation unit are always ' - 'displayed.') - parser.add_argument('-line-filter', default=None, - help='List of files with line ranges to filter the' - 'warnings.') - if yaml: - parser.add_argument('-export-fixes', metavar='filename', dest='export_fixes', - help='Create a yaml file to store suggested fixes in, ' - 'which can be applied with clang-apply-replacements.') - parser.add_argument('-j', type=int, default=0, - help='number of tidy instances to be run in parallel.') - parser.add_argument('files', nargs='*', default=['.*'], - help='files to be processed (regex on path)') - parser.add_argument('-fix', action='store_true', help='apply fix-its') - parser.add_argument('-format', action='store_true', help='Reformat code ' - 'after applying fixes') - parser.add_argument('-style', default='file', help='The style of reformat ' - 'code after applying fixes') - parser.add_argument('-use-color', type=strtobool, nargs='?', const=True, - help='Use colors in diagnostics, overriding clang-tidy\'s' - ' default behavior. This option overrides the \'UseColor' - '\' option in .clang-tidy file, if any.') - parser.add_argument('-p', dest='build_path', - help='Path used to read a compile command database.') - parser.add_argument('-extra-arg', dest='extra_arg', - action='append', default=[], - help='Additional argument to append to the compiler ' - 'command line.') - parser.add_argument('-extra-arg-before', dest='extra_arg_before', - action='append', default=[], - help='Additional argument to prepend to the compiler ' - 'command line.') - parser.add_argument('-ignore', default=DEFAULT_CLANG_TIDY_IGNORE, - help='File path to clang-tidy-ignore') - parser.add_argument('-quiet', action='store_true', - help='Run clang-tidy in quiet mode') - parser.add_argument('-load', dest='plugins', - action='append', default=[], - help='Load the specified plugin in clang-tidy.') - args = parser.parse_args() - - db_path = 'compile_commands.json' - - if args.build_path is not None: - build_path = args.build_path - else: - # Find our database - build_path = find_compilation_database(db_path) - - clang_tidy_binary = find_binary(args.clang_tidy_binary, "clang-tidy", - build_path) - - tmpdir = None - if args.fix or (yaml and args.export_fixes): - clang_apply_replacements_binary = find_binary( - args.clang_apply_replacements_binary, "clang-apply-replacements", - build_path) - tmpdir = tempfile.mkdtemp() - - try: - invocation = get_tidy_invocation("", clang_tidy_binary, args.checks, - None, build_path, args.header_filter, - args.allow_enabling_alpha_checkers, - args.extra_arg, args.extra_arg_before, - args.quiet, args.config_file, args.config, - args.line_filter, args.use_color, - args.plugins) - invocation.append('-list-checks') - invocation.append('-') - if args.quiet: - # Even with -quiet we still want to check if we can call clang-tidy. - with open(os.devnull, 'w') as dev_null: - subprocess.check_call(invocation, stdout=dev_null) - else: - subprocess.check_call(invocation) - except: - print("Unable to run clang-tidy.", file=sys.stderr) - sys.exit(1) - - # Load the database and extract all files. - database = json.load(open(os.path.join(build_path, db_path))) - files = set([make_absolute(entry['file'], entry['directory']) - for entry in database]) - files, excluded = filter_files(args.ignore, files) - if excluded: - print("Excluding the following files:\n" + "\n".join(excluded) + "\n") - - max_task = args.j - if max_task == 0: - max_task = multiprocessing.cpu_count() - - # Build up a big regexy filter from all command line arguments. - file_name_re = re.compile('|'.join(args.files)) - - return_code = 0 - try: - # Spin up a bunch of tidy-launching threads. - task_queue = queue.Queue(max_task) - # List of files with a non-zero return code. - failed_files = [] - lock = threading.Lock() - for _ in range(max_task): - t = threading.Thread(target=run_tidy, - args=(args, clang_tidy_binary, tmpdir, build_path, - task_queue, lock, failed_files)) - t.daemon = True - t.start() - - # Fill the queue with files. - for name in files: - if file_name_re.search(name): - task_queue.put(name) - - # Wait for all threads to be done. - task_queue.join() - if len(failed_files): - return_code = 1 - - except KeyboardInterrupt: - # This is a sad hack. Unfortunately subprocess goes - # bonkers with ctrl-c and we start forking merrily. - print('\nCtrl-C detected, goodbye.') - if tmpdir: - shutil.rmtree(tmpdir) - os.kill(0, 9) - - if yaml and args.export_fixes: - print('Writing fixes to ' + args.export_fixes + ' ...') - try: - merge_replacement_files(tmpdir, args.export_fixes) - except: - print('Error exporting fixes.\n', file=sys.stderr) - traceback.print_exc() - return_code=1 - - if args.fix: - print('Applying fixes ...') - try: - apply_fixes(args, clang_apply_replacements_binary, tmpdir) - except: - print('Error applying fixes.\n', file=sys.stderr) - traceback.print_exc() - return_code = 1 - - if tmpdir: - shutil.rmtree(tmpdir) - sys.exit(return_code) - - -if __name__ == '__main__': - main() diff --git a/cpp_src/CMakeLists.txt b/cpp_src/CMakeLists.txt index f96ea5f1c..49ca23dd0 100644 --- a/cpp_src/CMakeLists.txt +++ b/cpp_src/CMakeLists.txt @@ -1,13 +1,13 @@ cmake_minimum_required(VERSION 3.10) # Configure cmake options -if (MSVC) +if(MSVC) # Enable C++20 for windows build to be able to use designated initializers. # GCC/Clang support them even with C++17. set(CMAKE_CXX_STANDARD 20) -else () +else() set(CMAKE_CXX_STANDARD 17) -endif () +endif() set(CMAKE_CXX_STANDARD_REQUIRED ON) include(CMakeToolsHelpers OPTIONAL) include(ExternalProject) @@ -15,117 +15,122 @@ include(ProcessorCount) set(CMAKE_DISABLE_IN_SOURCE_BUILD ON) -option (WITH_ASAN "Enable AddressSanitized build" OFF) -option (WITH_TSAN "Enable ThreadSanitized build" OFF) -option (WITH_GCOV "Enable instrumented code coverage build" OFF) -option (WITH_STDLIB_DEBUG "Enable compiler's debug flags for stdlib behaviour validation (gcc/clang)" OFF) +option(WITH_ASAN "Enable AddressSanitized build" OFF) +option(WITH_TSAN "Enable ThreadSanitized build" OFF) +option(WITH_GCOV "Enable instrumented code coverage build" OFF) +option(WITH_STDLIB_DEBUG "Enable compiler's debug flags for stdlib behaviour validation (gcc/clang)" OFF) +option(WITH_LTO "Enable LTO (Release/RelWithDebInfo build only)" OFF) -if (WIN32) - option (WITH_CPPTRACE "Enable CppTrace" ON) +if(WIN32) + option(WITH_CPPTRACE "Enable CppTrace" ON) endif() -option (ENABLE_LIBUNWIND "Enable libunwind" ON) -option (ENABLE_TCMALLOC "Enable tcmalloc extensions" ON) -option (ENABLE_JEMALLOC "Enable jemalloc extensions" ON) -option (ENABLE_ROCKSDB "Enable rocksdb storage" ON) -option (ENABLE_GRPC "Enable GRPC service" OFF) -option (ENABLE_SSE "Enable SSE instructions" ON) -option (ENABLE_SERVER_AS_PROCESS_IN_TEST "Enable server as process" OFF) +option(ENABLE_LIBUNWIND "Enable libunwind" ON) +option(ENABLE_TCMALLOC "Enable tcmalloc extensions" ON) +option(ENABLE_JEMALLOC "Enable jemalloc extensions" ON) +option(ENABLE_ROCKSDB "Enable rocksdb storage" ON) +option(ENABLE_GRPC "Enable GRPC service" OFF) +option(ENABLE_SSE "Enable SSE instructions" ON) +option(ENABLE_SERVER_AS_PROCESS_IN_TEST "Enable server as process" OFF) -if (NOT GRPC_PACKAGE_PROVIDER) - set (GRPC_PACKAGE_PROVIDER "CONFIG") -endif () +if(NOT GRPC_PACKAGE_PROVIDER) + set(GRPC_PACKAGE_PROVIDER "CONFIG") +endif() -if (WIN32) - option (LINK_RESOURCES "Link web resources as binary data" OFF) +if(WIN32) + option(LINK_RESOURCES "Link web resources as binary data" OFF) else() - option (LINK_RESOURCES "Link web resources as binary data" ON) + option(LINK_RESOURCES "Link web resources as binary data" ON) endif() -set (REINDEXER_VERSION_DEFAULT "3.25.0") +set(REINDEXER_VERSION_DEFAULT "3.25.0") if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE "RelWithDebInfo") endif() enable_testing() -include(GNUInstallDirs) +include (GNUInstallDirs) project(reindexer) -set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/cmake/modules ) +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/cmake/modules ) include (CheckLinkerFlag) include (TargetArch) target_architecture(COMPILER_TARGET_ARCH) # Configure compile options -if (MSVC) +if(MSVC) set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -Zi") set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O2 -Zi") set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG -Zi") set(CMAKE_C_FLAGS_RELEASE "-O2 -DNDEBUG -Zi") -else () +else() set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g1") set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 -g1") set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG") set(CMAKE_C_FLAGS_RELEASE "-O3 -DNDEBUG") -endif () -if (${COMPILER_TARGET_ARCH} STREQUAL "e2k") +endif() +if(${COMPILER_TARGET_ARCH} STREQUAL "e2k") set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g0") add_definitions(-D__E2K__) add_definitions(-D__LCC__) -endif () +endif() -if (NOT MSVC AND NOT APPLE) +if(NOT MSVC AND NOT APPLE) check_linker_flag (-gz cxx_linker_supports_gz) - if (cxx_linker_supports_gz) - set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -gz") - endif () -endif () - -if (MSVC) - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd4244 -wd4267 -wd4996 -wd4717 -MP") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -wd4244 -wd4267 -wd4996 -wd4717 -wd4800 -wd4396 -wd4503 -MP") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -SAFESEH:NO") + if(cxx_linker_supports_gz) + set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -gz") + endif() +endif() + +if(MSVC) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd4244 -wd4267 -wd4996 -wd4717 -MP") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -wd4244 -wd4267 -wd4996 -wd4717 -wd4800 -wd4396 -wd4503 -MP") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -SAFESEH:NO") else() - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -Werror -Wswitch-enum") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wall -Wextra -Werror -Wswitch-enum -Wold-style-cast -fexceptions") - if (${COMPILER_TARGET_ARCH} STREQUAL "e2k") - set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -gline -fverbose-asm") - set (CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-parameter") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -Werror -Wswitch-enum") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wall -Wextra -Werror -Wswitch-enum -Wold-style-cast -fexceptions") + if(${COMPILER_TARGET_ARCH} STREQUAL "e2k") + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -gline -fverbose-asm") + set(CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-parameter") endif() -endif () +endif() + +if(WITH_LTO) + include (RxSetupLTO) +endif() -set (EXTRA_FLAGS "") +set(EXTRA_FLAGS "") -if (WITH_ASAN AND WITH_TSAN) +if(WITH_ASAN AND WITH_TSAN) message(FATAL_ERROR "You cannot use the ASAN and TSAN options at the same time, CMake will exit.") endif() -if (WITH_ASAN) - set (EXTRA_FLAGS "-fsanitize=address") +if(WITH_ASAN) + set(EXTRA_FLAGS "-fsanitize=address") add_definitions(-DREINDEX_WITH_ASAN) -elseif (WITH_TSAN) - set (EXTRA_FLAGS "-fsanitize=thread") +elseif(WITH_TSAN) + set(EXTRA_FLAGS "-fsanitize=thread") add_definitions(-DREINDEX_WITH_TSAN) -endif () -if (WITH_GCOV) - set (EXTRA_FLAGS "-fprofile-arcs -ftest-coverage") -endif () +endif() +if(WITH_GCOV) + set(EXTRA_FLAGS "-fprofile-arcs -ftest-coverage") +endif() -if (WITH_STDLIB_DEBUG) +if(WITH_STDLIB_DEBUG) add_definitions(-DRX_WITH_STDLIB_DEBUG=1) - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") add_definitions(-D_GLIBCXX_DEBUG) add_definitions(-D_GLIBCXX_DEBUG_PEDANTIC) - else () - message ("Option 'WITH_STDLIB_DEBUG' was requested, but there is not such option for current toolcain: '${CMAKE_CXX_COMPILER_ID}'. Disabling...") - endif () -endif () + else() + message("Option 'WITH_STDLIB_DEBUG' was requested, but there is not such option for current toolcain: '${CMAKE_CXX_COMPILER_ID}'. Disabling...") + endif() +endif() -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_FLAGS}") -set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${EXTRA_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_FLAGS}") +set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${EXTRA_FLAGS}") # Build project set(TARGET reindexer) @@ -133,7 +138,6 @@ set(TARGET reindexer) set(REINDEXER_SOURCE_PATH ${PROJECT_SOURCE_DIR}) set(REINDEXER_BINARY_PATH ${PROJECT_BINARY_DIR}) - file ( GLOB_RECURSE SRCS @@ -148,117 +152,117 @@ file ( ${REINDEXER_SOURCE_PATH}/coroutine/* ) -string(REGEX REPLACE "([][+*()^])" "\\\\\\1" BASE_CORO_CONTEXT_DIR "${REINDEXER_SOURCE_PATH}/vendor/koishi") -set (CONTEXT_ASM_DIR "${BASE_CORO_CONTEXT_DIR}/fcontext/asm") +string (REGEX REPLACE "([][+*()^])" "\\\\\\1" BASE_CORO_CONTEXT_DIR "${REINDEXER_SOURCE_PATH}/vendor/koishi") +set(CONTEXT_ASM_DIR "${BASE_CORO_CONTEXT_DIR}/fcontext/asm") list(FILTER SRCS EXCLUDE REGEX "${BASE_CORO_CONTEXT_DIR}/.*" ) -if ( UNIX ) - enable_language(ASM) - if (APPLE) - if ( ${COMPILER_TARGET_ARCH} STREQUAL "arm" ) - list (APPEND CONTEXT_ASM_SRCS +if( UNIX ) + enable_language (ASM) + if(APPLE) + if( ${COMPILER_TARGET_ARCH} STREQUAL "arm" ) + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_arm_aapcs_macho_gas.S ${CONTEXT_ASM_DIR}/make_arm_aapcs_macho_gas.S ) - elseif ( ${COMPILER_TARGET_ARCH} STREQUAL "arm64" OR ${COMPILER_TARGET_ARCH} STREQUAL "aarch64" ) - list (APPEND CONTEXT_ASM_SRCS + elseif( ${COMPILER_TARGET_ARCH} STREQUAL "arm64" OR ${COMPILER_TARGET_ARCH} STREQUAL "aarch64" ) + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_arm64_aapcs_macho_gas.S ${CONTEXT_ASM_DIR}/make_arm64_aapcs_macho_gas.S ) - elseif ( ${COMPILER_TARGET_ARCH} STREQUAL "x86_64") - list (APPEND CONTEXT_ASM_SRCS + elseif( ${COMPILER_TARGET_ARCH} STREQUAL "x86_64") + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_x86_64_sysv_macho_gas.S ${CONTEXT_ASM_DIR}/make_x86_64_sysv_macho_gas.S ) - elseif ( ${COMPILER_TARGET_ARCH} STREQUAL "i386") - list (APPEND CONTEXT_ASM_SRCS + elseif( ${COMPILER_TARGET_ARCH} STREQUAL "i386") + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_i386_sysv_macho_gas.S ${CONTEXT_ASM_DIR}/make_i386_sysv_macho_gas.S ) - else () - message (FATAL_ERROR "Unsupported APPLE-platform architecture: ${COMPILER_TARGET_ARCH}. Unable to chose context sources") - endif () - else () - if ( ${COMPILER_TARGET_ARCH} STREQUAL "arm" ) - list (APPEND CONTEXT_ASM_SRCS + else() + message(FATAL_ERROR "Unsupported APPLE-platform architecture: ${COMPILER_TARGET_ARCH}. Unable to chose context sources") + endif() + else() + if( ${COMPILER_TARGET_ARCH} STREQUAL "arm" ) + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_arm_aapcs_elf_gas.S ${CONTEXT_ASM_DIR}/make_arm_aapcs_elf_gas.S ) - elseif ( ${COMPILER_TARGET_ARCH} STREQUAL "arm64" OR ${COMPILER_TARGET_ARCH} STREQUAL "aarch64" ) - list (APPEND CONTEXT_ASM_SRCS + elseif( ${COMPILER_TARGET_ARCH} STREQUAL "arm64" OR ${COMPILER_TARGET_ARCH} STREQUAL "aarch64" ) + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_arm64_aapcs_elf_gas.S ${CONTEXT_ASM_DIR}/make_arm64_aapcs_elf_gas.S ) - elseif ( ${COMPILER_TARGET_ARCH} STREQUAL "x86_64") - list (APPEND CONTEXT_ASM_SRCS + elseif( ${COMPILER_TARGET_ARCH} STREQUAL "x86_64") + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_x86_64_sysv_elf_gas.S ${CONTEXT_ASM_DIR}/make_x86_64_sysv_elf_gas.S ) - elseif ( ${COMPILER_TARGET_ARCH} STREQUAL "i386") - list (APPEND CONTEXT_ASM_SRCS + elseif( ${COMPILER_TARGET_ARCH} STREQUAL "i386") + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_i386_sysv_elf_gas.S ${CONTEXT_ASM_DIR}/make_i386_sysv_elf_gas.S ) - elseif (NOT ${COMPILER_TARGET_ARCH} STREQUAL "e2k") - message (FATAL_ERROR "Unsupported Linux-platform architecture: ${COMPILER_TARGET_ARCH}. Unable to chose context sources") - endif () - endif () -elseif (WIN32) - if (MINGW) + elseif(NOT ${COMPILER_TARGET_ARCH} STREQUAL "e2k") + message(FATAL_ERROR "Unsupported Linux-platform architecture: ${COMPILER_TARGET_ARCH}. Unable to chose context sources") + endif() + endif() +elseif(WIN32) + if(MINGW) enable_language(ASM) - if ( ${COMPILER_TARGET_ARCH} STREQUAL "x86_64") - list (APPEND CONTEXT_ASM_SRCS + if( ${COMPILER_TARGET_ARCH} STREQUAL "x86_64") + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_x86_64_ms_pe_clang_gas.S ${CONTEXT_ASM_DIR}/make_x86_64_ms_pe_clang_gas.S ) - elseif ( ${COMPILER_TARGET_ARCH} STREQUAL "i386") - list (APPEND CONTEXT_ASM_SRCS + elseif( ${COMPILER_TARGET_ARCH} STREQUAL "i386") + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_i386_ms_pe_clang_gas.S ${CONTEXT_ASM_DIR}/make_i386_ms_pe_clang_gas.S ) - else () - message (FATAL_ERROR "Unsupported WIN-platform architecture: ${COMPILER_TARGET_ARCH}. Unable to chose context sources") - endif () - else () - enable_language(ASM_MASM) - if ( ${COMPILER_TARGET_ARCH} STREQUAL "x86_64") - list (APPEND CONTEXT_ASM_SRCS + else() + message(FATAL_ERROR "Unsupported WIN-platform architecture: ${COMPILER_TARGET_ARCH}. Unable to chose context sources") + endif() + else() + enable_language (ASM_MASM) + if( ${COMPILER_TARGET_ARCH} STREQUAL "x86_64") + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_x86_64_ms_pe_masm.asm ${CONTEXT_ASM_DIR}/make_x86_64_ms_pe_masm.asm ) - elseif ( ${COMPILER_TARGET_ARCH} STREQUAL "i386") - list (APPEND CONTEXT_ASM_SRCS + elseif( ${COMPILER_TARGET_ARCH} STREQUAL "i386") + list(APPEND CONTEXT_ASM_SRCS ${CONTEXT_ASM_DIR}/jump_i386_ms_pe_masm.asm ${CONTEXT_ASM_DIR}/make_i386_ms_pe_masm.asm ) - else () - message (FATAL_ERROR "Unsupported WIN-platform architecture: ${COMPILER_TARGET_ARCH}. Unable to chose context sources") - endif () - endif () -else () - message (FATAL_ERROR "Unsupported platform. Unable to chose context sources") -endif () + else() + message(FATAL_ERROR "Unsupported WIN-platform architecture: ${COMPILER_TARGET_ARCH}. Unable to chose context sources") + endif() + endif() +else() + message(FATAL_ERROR "Unsupported platform. Unable to chose context sources") +endif() list(APPEND SRCS ${CONTEXT_ASM_SRCS}) -if (ENABLE_SSE) - if (NOT MSVC AND NOT APPLE AND (${COMPILER_TARGET_ARCH} STREQUAL "x86_64" OR ${COMPILER_TARGET_ARCH} STREQUAL "i386")) +if(ENABLE_SSE) + if(NOT MSVC AND NOT APPLE AND (${COMPILER_TARGET_ARCH} STREQUAL "x86_64" OR ${COMPILER_TARGET_ARCH} STREQUAL "i386")) add_definitions(-DREINDEXER_WITH_SSE=1) - message ("Building with SSE support...") - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse -msse2 -msse3 -mssse3 -msse4 -msse4.1 -msse4.2 -mpopcnt") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse -msse2 -msse3 -mssse3 -msse4 -msse4.1 -msse4.2 -mpopcnt") - else () - message ("SSE compiler flags were disabled for the current platform") - endif () -endif () + message("Building with SSE support...") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse -msse2 -msse3 -mssse3 -msse4 -msse4.1 -msse4.2 -mpopcnt") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse -msse2 -msse3 -mssse3 -msse4 -msse4.1 -msse4.2 -mpopcnt") + else() + message("SSE compiler flags were disabled for the current platform") + endif() +endif() include_directories(${REINDEXER_SOURCE_PATH}) include_directories(${REINDEXER_SOURCE_PATH}/vendor) -set (MSGPACK_INCLUDE_PATH ${REINDEXER_SOURCE_PATH}/vendor/msgpack) +set(MSGPACK_INCLUDE_PATH ${REINDEXER_SOURCE_PATH}/vendor/msgpack) include_directories(${MSGPACK_INCLUDE_PATH}) -set (KOISHI_PATH ${REINDEXER_SOURCE_PATH}/vendor/koishi) -if (CMAKE_GENERATOR MATCHES "Visual Studio") +set(KOISHI_PATH ${REINDEXER_SOURCE_PATH}/vendor/koishi) +if(CMAKE_GENERATOR MATCHES "Visual Studio") add_definitions("-DKOISHI_THREAD_LOCAL=__declspec(thread)") else() add_definitions(-DKOISHI_THREAD_LOCAL=_Thread_local) @@ -280,14 +284,14 @@ list(APPEND SRCS ${KOISHI_PATH}/include/koishi.h ${KOISHI_PATH}/stack_alloc.c ${KOISHI_PATH}/stack_alloc.h ) -if (${COMPILER_TARGET_ARCH} STREQUAL "e2k") +if(${COMPILER_TARGET_ARCH} STREQUAL "e2k") list(APPEND SRCS ${KOISHI_PATH}/ucontext_e2k/ucontext_e2k.c) else() list(APPEND SRCS ${KOISHI_PATH}/fcontext/fcontext.c ${KOISHI_PATH}/fcontext/fcontext.hpp) endif() # Static LevelDB v1.23 is built with -fno-rtti by default. To inherit our logger from leveldb's logger, this file must be built with -fno-rtti to -set_source_files_properties(${REINDEXER_SOURCE_PATH}/core/storage/leveldblogger.cc PROPERTIES COMPILE_FLAGS "-fno-rtti") +set_source_files_properties (${REINDEXER_SOURCE_PATH}/core/storage/leveldblogger.cc PROPERTIES COMPILE_FLAGS "-fno-rtti") list(APPEND REINDEXER_LIBRARIES reindexer) add_library(${TARGET} STATIC ${HDRS} ${SRCS} ${VENDORS}) @@ -302,40 +306,40 @@ add_subdirectory(server/contrib) # tcmalloc ########## -if (NOT WITH_ASAN AND NOT WITH_TSAN) +if(NOT WITH_ASAN AND NOT WITH_TSAN) # tmalloc conflict with sanitizers, so disable it for sanitized builds - if (ENABLE_TCMALLOC) + if(ENABLE_TCMALLOC) find_package(Gperftools) if(GPERFTOOLS_TCMALLOC) include_directories(SYSTEM ${GPERFTOOLS_INCLUDE_DIR}) add_definitions(-DREINDEX_WITH_GPERFTOOLS=1) - list (APPEND REINDEXER_LIBRARIES ${GPERFTOOLS_LIBRARIES}) + list(APPEND REINDEXER_LIBRARIES ${GPERFTOOLS_LIBRARIES}) endif() endif() - if (NOT GPERFTOOLS_TCMALLOC AND ENABLE_JEMALLOC) + if(NOT GPERFTOOLS_TCMALLOC AND ENABLE_JEMALLOC) find_package(Jemalloc) if(JEMALLOC_FOUND) include_directories(SYSTEM ${JEMALLOC_INCLUDE_DIR}) add_definitions(-DREINDEX_WITH_JEMALLOC=1) - list (APPEND REINDEXER_LIBRARIES ${JEMALLOC_LIBRARY}) + list(APPEND REINDEXER_LIBRARIES ${JEMALLOC_LIBRARY}) endif() - endif () -endif () + endif() +endif() # snappy ######## -if (NOT WITH_TSAN) +if(NOT WITH_TSAN) find_package(Snappy) endif() -if (SNAPPY_FOUND) +if(SNAPPY_FOUND) include_directories(SYSTEM ${SNAPPY_INCLUDE_DIR}) list(APPEND REINDEXER_LIBRARIES ${SNAPPY_LIBRARIES}) -else () - if (WITH_TSAN) - message (STATUS "Snappy will be downloaded from Github to avoid false-positive warnings from TSAN") +else() + if(WITH_TSAN) + message(STATUS "Snappy will be downloaded from Github to avoid false-positive warnings from TSAN") else() - message (STATUS "Snappy not found. Will download it") + message(STATUS "Snappy not found. Will download it") endif() ExternalProject_Add( snappy_lib @@ -344,59 +348,59 @@ else () CMAKE_ARGS -DSNAPPY_BUILD_TESTS=OFF -DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR} -DCMAKE_INSTALL_LIBDIR=${CMAKE_CURRENT_BINARY_DIR} ) - include_directories (${CMAKE_CURRENT_BINARY_DIR}/include) + include_directories(${CMAKE_CURRENT_BINARY_DIR}/include) link_directories(${CMAKE_CURRENT_BINARY_DIR}) list(APPEND REINDEXER_LIBRARIES snappy) -endif () +endif() # storage ######### # rocksdb -if (ENABLE_ROCKSDB) +if(ENABLE_ROCKSDB) if(GPERFTOOLS_TCMALLOC AND NOT WIN32 AND WITH_PYTHON) - message (STATUS "Python connector is incompatible with both tcmalloc and RocksDB enabled. Disabling RocksDB") + message(STATUS "Python connector is incompatible with both tcmalloc and RocksDB enabled. Disabling RocksDB") else() - if (WITH_PYTHON) + if(WITH_PYTHON) # librocksdb usually compiles without -fPIC, so it can't be linked to another shared library (i.e. to reindexer python bindings) - set (RocksDB_NAMES librocksdb.so) + set(RocksDB_NAMES librocksdb.so) elseif(GPERFTOOLS_TCMALLOC AND NOT WIN32) # shared version of rocksdb can conflict with tcmalloc, so force static version of rocksdb, if tcmalloc enabled - set (RocksDB_NAMES librocksdb.a) + set(RocksDB_NAMES librocksdb.a) endif() find_library(RocksDB_LIBRARY NAMES ${RocksDB_NAMES} rocksdb HINTS $ENV{ROCKSDB_ROOT}/lib) find_path(RocksDB_INCLUDE_DIR NAMES rocksdb/db.h HINTS $ENV{ROCKSDB_ROOT}/include /opt/local/include /usr/local/include /usr/include) - if (RocksDB_LIBRARY AND RocksDB_INCLUDE_DIR) - message (STATUS "Found RocksDB: ${RocksDB_LIBRARY}") + if(RocksDB_LIBRARY AND RocksDB_INCLUDE_DIR) + message(STATUS "Found RocksDB: ${RocksDB_LIBRARY}") find_library(BZ2_LIBRARY bz2 bzip2) - if (BZ2_LIBRARY) - message (STATUS "Found libbz2: ${BZ2_LIBRARY}") + if(BZ2_LIBRARY) + message(STATUS "Found libbz2: ${BZ2_LIBRARY}") list(APPEND REINDEXER_LIBRARIES ${BZ2_LIBRARY}) else() - message (STATUS "libbz2: not found") + message(STATUS "libbz2: not found") endif() find_library(LZ4_LIBRARY lz4) - if (LZ4_LIBRARY) - message (STATUS "Found liblz4: ${LZ4_LIBRARY}") + if(LZ4_LIBRARY) + message(STATUS "Found liblz4: ${LZ4_LIBRARY}") list(APPEND REINDEXER_LIBRARIES ${LZ4_LIBRARY}) else() - message (STATUS "liblz4: not found") + message(STATUS "liblz4: not found") endif() find_library(Z_LIBRARY z) - if (Z_LIBRARY) - message (STATUS "Found zlib: ${Z_LIBRARY}") + if(Z_LIBRARY) + message(STATUS "Found zlib: ${Z_LIBRARY}") list(APPEND REINDEXER_LIBRARIES ${Z_LIBRARY}) else() - message (STATUS "zlib: not found") + message(STATUS "zlib: not found") endif() find_library(ZSTD_LIBRARY zstd) - if (ZSTD_LIBRARY) - message (STATUS "Found zstdlib: ${ZSTD_LIBRARY}") + if(ZSTD_LIBRARY) + message(STATUS "Found zstdlib: ${ZSTD_LIBRARY}") list(APPEND REINDEXER_LIBRARIES ${ZSTD_LIBRARY}) else() - message (STATUS "zstdlib: not found") + message(STATUS "zstdlib: not found") endif() include_directories(SYSTEM ${RocksDB_INCLUDE_DIR}) @@ -409,19 +413,19 @@ endif() # leveldb if(GPERFTOOLS_TCMALLOC AND NOT WIN32) # shared version of leveldb can conflict with tcmalloc, so force static version of leveldb, if tcmalloc enabled - set (LevelDB_NAMES libleveldb.a) + set(LevelDB_NAMES libleveldb.a) endif() -if (NOT WITH_TSAN) +if(NOT WITH_TSAN) find_library(LevelDB_LIBRARY NAMES ${LevelDB_NAMES} leveldb HINTS $ENV{LEVELDB_ROOT}/lib) find_path(LevelDB_INCLUDE_DIR NAMES leveldb/db.h HINTS $ENV{LEVELDB_ROOT}/include /opt/local/include /usr/local/include /usr/include) endif() -if (NOT LevelDB_LIBRARY OR NOT LevelDB_INCLUDE_DIR OR WITH_TSAN) - if (WITH_TSAN) - message (STATUS "LevelDB will be downloaded from Github to avoid false-positive warnings from TSAN") - else () +if(NOT LevelDB_LIBRARY OR NOT LevelDB_INCLUDE_DIR OR WITH_TSAN) + if(WITH_TSAN) + message(STATUS "LevelDB will be downloaded from Github to avoid false-positive warnings from TSAN") + else() # Leveldb not found. Download it - message (STATUS "LevelDB not found. Will download it") + message(STATUS "LevelDB not found. Will download it") endif() ExternalProject_Add( leveldb_lib @@ -433,19 +437,19 @@ if (NOT LevelDB_LIBRARY OR NOT LevelDB_INCLUDE_DIR OR WITH_TSAN) -DCMAKE_EXE_LINKER_FLAGS=-L${CMAKE_CURRENT_BINARY_DIR} -DCMAKE_INSTALL_LIBDIR=${CMAKE_CURRENT_BINARY_DIR} ) - if (NOT SNAPPY_FOUND) + if(NOT SNAPPY_FOUND) add_dependencies(leveldb_lib snappy_lib) endif() - include_directories (${CMAKE_CURRENT_BINARY_DIR}/include) + include_directories(${CMAKE_CURRENT_BINARY_DIR}/include) link_directories(${CMAKE_CURRENT_BINARY_DIR}) list(APPEND REINDEXER_LINK_DIRECTORIES ${CMAKE_CURRENT_BINARY_DIR}) list(INSERT REINDEXER_LIBRARIES 1 leveldb) add_dependencies(reindexer leveldb_lib) -else () - message (STATUS "Found LevelDB: ${LevelDB_LIBRARY}") +else() + message(STATUS "Found LevelDB: ${LevelDB_LIBRARY}") include_directories(SYSTEM ${LevelDB_INCLUDE_DIR}) list(INSERT REINDEXER_LIBRARIES 1 ${LevelDB_LIBRARY}) -endif () +endif() add_definitions(-DREINDEX_WITH_LEVELDB) # System libraries @@ -453,7 +457,7 @@ set(THREADS_PREFER_PTHREAD_FLAG TRUE) find_package(Threads REQUIRED ON) list(APPEND REINDEXER_LIBRARIES ${CMAKE_THREAD_LIBS_INIT} ) -if (WITH_CPPTRACE) +if(WITH_CPPTRACE) ExternalProject_Add( cpptrace_lib GIT_REPOSITORY "https://github.com/jeremy-rifkin/cpptrace.git" @@ -473,14 +477,14 @@ endif() # librt find_library(LIBRT rt) if(LIBRT) - list (APPEND REINDEXER_LIBRARIES ${LIBRT}) + list(APPEND REINDEXER_LIBRARIES ${LIBRT}) endif() -if (NOT WIN32) +if(NOT WIN32) # libdl find_library(LIBDL dl) if(LIBDL) - list (APPEND REINDEXER_LIBRARIES ${LIBDL}) + list(APPEND REINDEXER_LIBRARIES ${LIBDL}) add_definitions(-DREINDEX_WITH_LIBDL=1) endif() endif() @@ -491,83 +495,83 @@ list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) check_symbol_exists (_Unwind_Backtrace unwind.h HAVE_BACKTRACE) check_symbol_exists (_Unwind_GetIPInfo unwind.h HAVE_GETIPINFO) list(REMOVE_ITEM CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) -if (HAVE_BACKTRACE AND HAVE_GETIPINFO) - set (SYSUNWIND On) - message ("-- Found system unwind") +if(HAVE_BACKTRACE AND HAVE_GETIPINFO) + set(SYSUNWIND On) + message("-- Found system unwind") add_definitions(-DREINDEX_WITH_UNWIND=1) endif() # libunwind -if (ENABLE_LIBUNWIND) +if(ENABLE_LIBUNWIND) find_library(LIBUNWIND unwind) if(LIBUNWIND) - list (APPEND REINDEXER_LIBRARIES ${LIBUNWIND} ) + list(APPEND REINDEXER_LIBRARIES ${LIBUNWIND} ) find_path(LIBUNWIND_INCLUDE_PATH libunwind.h) - if (LIBUNWIND_INCLUDE_PATH) + if(LIBUNWIND_INCLUDE_PATH) add_definitions(-DREINDEX_WITH_LIBUNWIND=1) - message ("-- Found Libunwind: ${LIBUNWIND} ${LIBUNWIND_INCLUDE_PATH}") + message("-- Found Libunwind: ${LIBUNWIND} ${LIBUNWIND_INCLUDE_PATH}") endif() endif() -endif () +endif() if(APPLE OR (NOT LIBUNWIND AND NOT SYSUNWIND)) # Try execinfo find_path(EXECINFO_INCLUDE_PATH execinfo.h) - if (EXECINFO_INCLUDE_PATH) - message ("-- Found execinfo.h: ${EXECINFO_INCLUDE_PATH}") + if(EXECINFO_INCLUDE_PATH) + message("-- Found execinfo.h: ${EXECINFO_INCLUDE_PATH}") add_definitions(-DREINDEX_WITH_EXECINFO=1) find_library(LIBEXECINFO execinfo) if(LIBEXECINFO) - list (APPEND REINDEXER_LIBRARIES ${LIBEXECINFO}) + list(APPEND REINDEXER_LIBRARIES ${LIBEXECINFO}) endif() endif() -endif () +endif() find_library(MUSL ld-musl-x86_64.so.1) -if (MUSL) - message ("-- Found musl, will override abort and assert_fail to fix stacktraces") +if(MUSL) + message("-- Found musl, will override abort and assert_fail to fix stacktraces") add_definitions(-DREINDEX_OVERRIDE_ABORT=1) - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer") - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer") endif() -if (APPLE) - add_definitions (-DREINDEX_WITH_APPLE_SYMBOLICATION=1) +if(APPLE) + add_definitions(-DREINDEX_WITH_APPLE_SYMBOLICATION=1) endif() -if (WIN32) +if(WIN32) list(APPEND REINDEXER_LIBRARIES shlwapi dbghelp ws2_32) -endif () +endif() -set (REINDEXER_LIBRARIES_GLOBAL ${REINDEXER_LIBRARIES} PARENT_SCOPE) -set (REINDEXER_LINK_DIRECTORIES_GLOBAL ${REINDEXER_LINK_DIRECTORIES} PARENT_SCOPE) +set(REINDEXER_LIBRARIES_GLOBAL ${REINDEXER_LIBRARIES} PARENT_SCOPE) +set(REINDEXER_LINK_DIRECTORIES_GLOBAL ${REINDEXER_LINK_DIRECTORIES} PARENT_SCOPE) # Get version string if(EXISTS ${PROJECT_SOURCE_DIR}/.git OR EXISTS ${PROJECT_SOURCE_DIR}/../.git ) execute_process(WORKING_DIRECTORY ${REINDEXER_SOURCE_PATH} COMMAND git describe --tags OUTPUT_VARIABLE REINDEXER_VERSION_FULL OUTPUT_STRIP_TRAILING_WHITESPACE) -endif () +endif() -if (NOT REINDEXER_VERSION_FULL) -set (REINDEXER_VERSION_FULL ${REINDEXER_VERSION_DEFAULT}) -endif () +if(NOT REINDEXER_VERSION_FULL) +set(REINDEXER_VERSION_FULL ${REINDEXER_VERSION_DEFAULT}) +endif() -set (REINDEXER_VERSION_H "#pragma once\n#define REINDEX_VERSION \"${REINDEXER_VERSION_FULL}\"\n") +set(REINDEXER_VERSION_H "#pragma once\n#define REINDEX_VERSION \"${REINDEXER_VERSION_FULL}\"\n") if(EXISTS ${PROJECT_BINARY_DIR}/reindexer_version.h) file(READ ${PROJECT_BINARY_DIR}/reindexer_version.h REINDEXER_VERSION_CUR_H) -endif () +endif() -if (NOT REINDEXER_VERSION_CUR_H STREQUAL REINDEXER_VERSION_H) +if(NOT REINDEXER_VERSION_CUR_H STREQUAL REINDEXER_VERSION_H) file(WRITE ${PROJECT_BINARY_DIR}/reindexer_version.h ${REINDEXER_VERSION_H}) -endif () +endif() include_directories(${PROJECT_BINARY_DIR}) string ( REGEX REPLACE "(.*)([0-9]+)\\.([0-9]+)\\.([0-9]+)(.*)" "\\2.\\3.\\4" REINDEXER_VERSION ${REINDEXER_VERSION_FULL}) string ( REGEX REPLACE ".*-([0-9]+)-(.*)" "\\1.\\2" REINDEXER_RELEASE ${REINDEXER_VERSION_FULL}) -set (REINDEXER_VERSION_REDUCED ${REINDEXER_VERSION}) -if (CMAKE_MATCH_1) - set (REINDEXER_VERSION ${REINDEXER_VERSION}.${REINDEXER_RELEASE}) +set(REINDEXER_VERSION_REDUCED ${REINDEXER_VERSION}) +if(CMAKE_MATCH_1) + set(REINDEXER_VERSION ${REINDEXER_VERSION}.${REINDEXER_RELEASE}) endif() # Packing and installation @@ -582,21 +586,21 @@ add_subdirectory(doc) # Tests and benchmarks find_package(GTest) -if (GTEST_FOUND) +if(GTEST_FOUND) include_directories(SYSTEM ${GTEST_INCLUDE_DIR}) add_subdirectory(gtests/tests) endif() find_package(benchmark) -if (benchmark_FOUND) - if (${benchmark_VERSION_MAJOR} EQUAL "1" AND ${benchmark_VERSION_MINOR} GREATER_EQUAL "5" AND ${benchmark_VERSION_MINOR} LESS_EQUAL "7") +if(benchmark_FOUND) + if(${benchmark_VERSION_MAJOR} EQUAL "1" AND ${benchmark_VERSION_MINOR} GREATER_EQUAL "5" AND ${benchmark_VERSION_MINOR} LESS_EQUAL "7") find_package(GBenchmark) - if (GBENCHMARK_FOUND) + if(GBENCHMARK_FOUND) include_directories(SYSTEM ${GBENCHMARK_INCLUDE_DIR}) add_subdirectory(gtests/bench) endif() else() - message (STATUS "Unsupported Google benchmark version: ${benchmark_VERSION}. Only versions 1.5.x-1.7.x are supported") + message(STATUS "Unsupported Google benchmark version: ${benchmark_VERSION}. Only versions 1.5.x-1.7.x are supported") endif() endif() @@ -607,3 +611,8 @@ add_custom_target(collect_coverage COMMAND genhtml coverage_filtered.info -o coverage_output COMMENT "Collecting Reindexer coverage" ) + +# Configure compile options extra +if(MSVC) + target_compile_options(${TARGET} PRIVATE /bigobj) +endif() diff --git a/cpp_src/cmake/modules/FindSnappy.cmake b/cpp_src/cmake/modules/FindSnappy.cmake index 0a7fbe235..46c8dd992 100644 --- a/cpp_src/cmake/modules/FindSnappy.cmake +++ b/cpp_src/cmake/modules/FindSnappy.cmake @@ -60,20 +60,20 @@ find_library( if (SNAPPY_INCLUDE_DIR AND SNAPPY_LIBRARY) set(SNAPPY_FOUND TRUE) set( SNAPPY_LIBRARIES ${SNAPPY_LIBRARY} ) -else () +else() set(SNAPPY_FOUND FALSE) set( SNAPPY_LIBRARIES ) -endif () +endif() if (SNAPPY_FOUND) message(STATUS "Found Snappy: ${SNAPPY_LIBRARY}") -else () +else() message(STATUS "Not Found Snappy: ${SNAPPY_LIBRARY}") if (SNAPPY_FIND_REQUIRED) message(STATUS "Looked for Snappy libraries named ${SNAPPY_NAMES}.") message(FATAL_ERROR "Could NOT find Snappy library") - endif () -endif () + endif() +endif() mark_as_advanced( SNAPPY_LIBRARY diff --git a/cpp_src/cmake/modules/RxPrepareCpackDeps.cmake b/cpp_src/cmake/modules/RxPrepareCpackDeps.cmake index cc1c6cb95..152aa8222 100644 --- a/cpp_src/cmake/modules/RxPrepareCpackDeps.cmake +++ b/cpp_src/cmake/modules/RxPrepareCpackDeps.cmake @@ -1,28 +1,28 @@ # Packaging and install stuff for the RPM/DEB/TGZ package if(CMAKE_SYSTEM_NAME MATCHES "Linux" AND EXISTS "/etc/issue") file(READ "/etc/issue" LINUX_ISSUE) -endif () +endif() if(CMAKE_SYSTEM_NAME MATCHES "Linux" AND EXISTS "/etc/os-release") file(READ "/etc/os-release" LINUX_ISSUE) -endif () +endif() set(CPACK_GENERATOR "TGZ") if (WIN32) set (CPACK_GENERATOR "NSIS") -elseif (LINUX_ISSUE MATCHES "Fedora" OR LINUX_ISSUE MATCHES "CentOS" OR LINUX_ISSUE MATCHES "Mandriva" +elseif(LINUX_ISSUE MATCHES "Fedora" OR LINUX_ISSUE MATCHES "CentOS" OR LINUX_ISSUE MATCHES "Mandriva" OR LINUX_ISSUE MATCHES "RED OS") set(CPACK_GENERATOR "RPM") set(CPACK_PACKAGE_RELOCATABLE OFF) -elseif (LINUX_ISSUE MATCHES "altlinux") +elseif(LINUX_ISSUE MATCHES "altlinux") set(CPACK_GENERATOR "RPM") set(CPACK_PACKAGE_RELOCATABLE OFF) set(RPM_EXTRA_LIB_PREFIX "lib") -elseif (LINUX_ISSUE MATCHES "Ubuntu" OR LINUX_ISSUE MATCHES "Debian" OR LINUX_ISSUE MATCHES "Mint") +elseif(LINUX_ISSUE MATCHES "Ubuntu" OR LINUX_ISSUE MATCHES "Debian" OR LINUX_ISSUE MATCHES "Mint") set(CPACK_GENERATOR "DEB") endif() -message ("Target cpack package type was detected as '${RxPrepareCpackDeps}'") +message("Target cpack package type was detected as '${RxPrepareCpackDeps}'") SET(CPACK_PACKAGE_NAME "reindexer") SET(CPACK_PACKAGE_DESCRIPTION_SUMMARY "ReindexerDB server package") @@ -35,7 +35,7 @@ set(CPACK_RPM_COMPONENT_INSTALL ON) set(CPACK_DEB_COMPONENT_INSTALL ON) if (WIN32) set(CPACK_SET_DESTDIR OFF) -else () +else() set(CPACK_SET_DESTDIR ON) endif() @@ -49,12 +49,12 @@ set (CPACK_RPM_PACKAGE_REQUIRES_PRE "") if (LevelDB_LIBRARY) SET(CPACK_DEBIAN_PACKAGE_DEPENDS "${CPACK_DEBIAN_PACKAGE_DEPENDS},libleveldb-dev") SET(CPACK_RPM_PACKAGE_REQUIRES_PRE "${CPACK_RPM_PACKAGE_REQUIRES_PRE},${RPM_EXTRA_LIB_PREFIX}leveldb") -endif () +endif() if (RocksDB_LIBRARY) SET(CPACK_DEBIAN_PACKAGE_DEPENDS "${CPACK_DEBIAN_PACKAGE_DEPENDS},librocksdb-dev") SET(CPACK_RPM_PACKAGE_REQUIRES_PRE "${CPACK_RPM_PACKAGE_REQUIRES_PRE},${RPM_EXTRA_LIB_PREFIX}rocksdb") -endif () +endif() if (Z_LIBRARY) SET(CPACK_DEBIAN_PACKAGE_DEPENDS "${CPACK_DEBIAN_PACKAGE_DEPENDS},zlib1g-dev") @@ -74,7 +74,7 @@ endif() if (SNAPPY_FOUND) SET(CPACK_DEBIAN_PACKAGE_DEPENDS "${CPACK_DEBIAN_PACKAGE_DEPENDS},libsnappy-dev") SET(CPACK_RPM_PACKAGE_REQUIRES_PRE "${CPACK_RPM_PACKAGE_REQUIRES_PRE},${RPM_EXTRA_LIB_PREFIX}snappy") -endif () +endif() if (LIBUNWIND) SET(CPACK_DEBIAN_PACKAGE_DEPENDS "${CPACK_DEBIAN_PACKAGE_DEPENDS},libunwind-dev") @@ -84,18 +84,18 @@ if (GPERFTOOLS_TCMALLOC) SET(CPACK_DEBIAN_PACKAGE_DEPENDS "${CPACK_DEBIAN_PACKAGE_DEPENDS},libgoogle-perftools4") if (LINUX_ISSUE MATCHES "altlinux") SET(CPACK_RPM_PACKAGE_REQUIRES_PRE "${CPACK_RPM_PACKAGE_REQUIRES_PRE},gperftools") - else () + else() SET(CPACK_RPM_PACKAGE_REQUIRES_PRE "${CPACK_RPM_PACKAGE_REQUIRES_PRE},gperftools-libs") - endif () -endif () + endif() +endif() # Remove first ',' from list of dependencies if (CPACK_DEBIAN_PACKAGE_DEPENDS STREQUAL "") set (CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libleveldb-dev") -else () +else() string (SUBSTRING "${CPACK_DEBIAN_PACKAGE_DEPENDS}" 1 -1 CPACK_DEBIAN_PACKAGE_DEPENDS) set (CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libleveldb-dev,${CPACK_DEBIAN_PACKAGE_DEPENDS}") -endif () +endif() if (CPACK_RPM_PACKAGE_REQUIRES_PRE STREQUAL "") set (CPACK_RPM_DEV_PACKAGE_REQUIRES_PRE "${RPM_EXTRA_LIB_PREFIX}leveldb-devel") @@ -106,12 +106,12 @@ else() foreach (DEP ${CPACK_RPM_PACKAGE_REQUIRES_LIST}) if (NOT "${DEP}" STREQUAL "gperftools-libs") list(APPEND CPACK_RPM_DEV_PACKAGE_REQUIRES_PRE "${DEP}-devel") - else () + else() list(APPEND CPACK_RPM_DEV_PACKAGE_REQUIRES_PRE "${DEP}") - endif () + endif() endforeach (DEP) string(REPLACE ";" "," CPACK_RPM_DEV_PACKAGE_REQUIRES_PRE "${CPACK_RPM_DEV_PACKAGE_REQUIRES_PRE}") -endif () +endif() set (CPACK_DEBIAN_SERVER_FILE_NAME "DEB-DEFAULT") set (CPACK_DEBIAN_DEV_FILE_NAME "DEB-DEFAULT") diff --git a/cpp_src/cmake/modules/RxPrepareInstallFiles.cmake b/cpp_src/cmake/modules/RxPrepareInstallFiles.cmake index 4351dafde..de02c8710 100644 --- a/cpp_src/cmake/modules/RxPrepareInstallFiles.cmake +++ b/cpp_src/cmake/modules/RxPrepareInstallFiles.cmake @@ -1,33 +1,33 @@ # Prepare installation files and headers if (NOT GO_BUILTIN_EXPORT_PKG_PATH) set (GO_BUILTIN_EXPORT_PKG_PATH "${PROJECT_SOURCE_DIR}/../bindings/builtin") -endif () +endif() if (NOT GO_BUILTIN_SERVER_EXPORT_PKG_PATH) set (GO_BUILTIN_SERVER_EXPORT_PKG_PATH "${PROJECT_SOURCE_DIR}/../bindings/builtinserver") -endif () +endif() if (GO_BUILTIN_EXPORT_PKG_PATH AND NOT IS_ABSOLUTE ${GO_BUILTIN_EXPORT_PKG_PATH}) set (GO_BUILTIN_EXPORT_PKG_PATH "${CMAKE_CURRENT_SOURCE_DIR}/${GO_BUILTIN_EXPORT_PKG_PATH}") -endif () +endif() function(generate_libs_list INPUT_LIBS OUTPUT_LIST_NAME) set (flibs ${${OUTPUT_LIST_NAME}}) foreach(lib ${INPUT_LIBS}) if (${lib} MATCHES "jemalloc" OR ${lib} MATCHES "tcmalloc") - elseif (${lib} STREQUAL "-pthread") + elseif(${lib} STREQUAL "-pthread") list(APPEND flibs " -lpthread") - elseif ("${lib}" MATCHES "^\\-.*") + elseif("${lib}" MATCHES "^\\-.*") list(APPEND flibs " ${lib}") - else () + else() if (NOT "${lib}" STREQUAL "snappy" OR SNAPPY_FOUND) get_filename_component(lib ${lib} NAME_WE) string(REGEX REPLACE "^lib" "" lib ${lib}) list(APPEND flibs " -l${lib}") - else () + else() list(APPEND flibs " -l${lib}") endif() - endif () + endif() endforeach(lib) list(APPEND flibs " -lstdc++") set (${OUTPUT_LIST_NAME} ${flibs} PARENT_SCOPE) @@ -50,7 +50,7 @@ if (NOT WIN32) unset (cgo_cxx_flags) unset (cgo_c_flags) unset (cgo_ld_flags) - endif () + endif() SET(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME "server") SET(DIST_INCLUDE_FILES @@ -144,5 +144,5 @@ else() @ONLY ) unset (cgo_ld_flags) - endif () -endif () + endif() +endif() diff --git a/cpp_src/cmake/modules/RxSetupLTO.cmake b/cpp_src/cmake/modules/RxSetupLTO.cmake new file mode 100644 index 000000000..a6f01f070 --- /dev/null +++ b/cpp_src/cmake/modules/RxSetupLTO.cmake @@ -0,0 +1,20 @@ +if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") + include(CheckIPOSupported) + check_ipo_supported(RESULT IPO_IS_SUPPORTED OUTPUT IPO_SUPPORT_OUT) + if(IPO_IS_SUPPORTED) + message("Building with LTO...") + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + # CMAKE unable to set threads count by himself + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -flto=4 -fno-fat-lto-objects -flto-odr-type-merging") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto=4 -fno-fat-lto-objects -flto-odr-type-merging") + else() + # Sets -flto-thin for Clang + set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) + endif() + else() + message(WARNING "LTO was requested, but not supported: ${IPO_SUPPORT_OUT}") + endif() +else() + message(WARNING "LTO was disabled for the 'Debug' build") +endif() + diff --git a/cpp_src/cmd/reindexer_server/CMakeLists.txt b/cpp_src/cmd/reindexer_server/CMakeLists.txt index c9e97f75d..cebb83d18 100644 --- a/cpp_src/cmd/reindexer_server/CMakeLists.txt +++ b/cpp_src/cmd/reindexer_server/CMakeLists.txt @@ -26,9 +26,9 @@ add_dependencies(${TARGET} reindexer_server_library) if (APPLE) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-rpath,@loader_path") -elseif (NOT WIN32) +elseif(NOT WIN32) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-rpath,\$ORIGIN") -endif () +endif() target_link_libraries(${TARGET} ${REINDEXER_LIBRARIES}) @@ -57,7 +57,7 @@ if (NOT WIN32) set (REINDEXER_INSTALL_PREFIX \"${CMAKE_INSTALL_PREFIX}\") else() set (REINDEXER_INSTALL_PREFIX \"\${CMAKE_INSTALL_PREFIX}\") - endif () + endif() endif() configure_file(${PROJECT_SOURCE_DIR}/contrib/config.yml.in ${PROJECT_BINARY_DIR}/contrib/config.yml) ") @@ -83,8 +83,8 @@ if (WIN32) install(FILES ${MINGW_DLL_DIR}/libgcc_s_seh-1.dll DESTINATION ${CMAKE_INSTALL_BINDIR}) elseif(EXISTS ${MINGW_DLL_DIR}/libgcc_s_dw2-1.dll) install(FILES ${MINGW_DLL_DIR}/libgcc_s_dw2-1.dll DESTINATION ${CMAKE_INSTALL_BINDIR}) - else () - message (WARNING "Can't find MinGW runtime") + else() + message(WARNING "Can't find MinGW runtime") endif() endif() diff --git a/cpp_src/cmd/reindexer_server/contrib/entrypoint.sh b/cpp_src/cmd/reindexer_server/contrib/entrypoint.sh index e5e065a76..e4c2eb30f 100755 --- a/cpp_src/cmd/reindexer_server/contrib/entrypoint.sh +++ b/cpp_src/cmd/reindexer_server/contrib/entrypoint.sh @@ -28,6 +28,10 @@ if [ -n "$RX_DISABLE_NS_LEAK" ]; then RX_ARGS="$RX_ARGS --disable-ns-leak" fi +if [ -n "$RX_MAX_HTTP_REQ" ]; then + RX_ARGS="$RX_ARGS --max-http-req $RX_MAX_HTTP_REQ" +fi + if [ -z "$@" ]; then reindexer_server $RX_ARGS else diff --git a/cpp_src/cmd/reindexer_server/test/test_storage_compatibility.sh b/cpp_src/cmd/reindexer_server/test/test_storage_compatibility.sh new file mode 100755 index 000000000..d189d3841 --- /dev/null +++ b/cpp_src/cmd/reindexer_server/test/test_storage_compatibility.sh @@ -0,0 +1,195 @@ +#!/bin/bash +# Task: https://github.com/restream/reindexer/-/issues/1188 +set -e + +function KillAndRemoveServer { + local pid=$1 + kill $pid + wait $pid + yum remove -y 'reindexer*' > /dev/null +} + +function WaitForDB { + # wait until DB is loaded + set +e # disable "exit on error" so the script won't stop when DB's not loaded yet + is_connected=$(reindexer_tool --dsn $ADDRESS --command '\databases list'); + while [[ $is_connected != "test" ]] + do + sleep 2 + is_connected=$(reindexer_tool --dsn $ADDRESS --command '\databases list'); + done + set -e +} + +function CompareNamespacesLists { + local ns_list_actual=$1 + local ns_list_expected=$2 + local pid=$3 + + diff=$(echo ${ns_list_actual[@]} ${ns_list_expected[@]} | tr ' ' '\n' | sort | uniq -u) # compare in any order + if [ "$diff" == "" ]; then + echo "## PASS: namespaces list not changed" + else + echo "##### FAIL: namespaces list was changed" + echo "expected: $ns_list_expected" + echo "actual: $ns_list_actual" + KillAndRemoveServer $pid; + exit 1 + fi +} + +function CompareMemstats { + local actual=$1 + local expected=$2 + local pid=$3 + diff=$(echo ${actual[@]} ${expected[@]} | tr ' ' '\n' | sed 's/\(.*\),$/\1/' | sort | uniq -u) # compare in any order + if [ "$diff" == "" ]; then + echo "## PASS: memstats not changed" + else + echo "##### FAIL: memstats was changed" + echo "expected: $expected" + echo "actual: $actual" + KillAndRemoveServer $pid; + exit 1 + fi +} + + +RX_SERVER_CURRENT_VERSION_RPM="$(basename build/reindexer-*server*.rpm)" +VERSION_FROM_RPM=$(echo "$RX_SERVER_CURRENT_VERSION_RPM" | grep -o '.*server-..') +VERSION=$(echo ${VERSION_FROM_RPM: -2:1}) # one-digit version + +echo "## choose latest release rpm file" +if [ $VERSION == 3 ]; then + LATEST_RELEASE=$(python3 cpp_src/cmd/reindexer_server/test/get_last_rx_version.py -v 3) + namespaces_list_expected=$'purchase_options_ext_dict\nchild_account_recommendations\n#config\n#activitystats\nradio_channels\ncollections\n#namespaces\nwp_imports_tasks\nepg_genres\nrecom_media_items_personal\nrecom_epg_archive_default\n#perfstats\nrecom_epg_live_default\nmedia_view_templates\nasset_video_servers\nwp_tasks_schedule\nadmin_roles\n#clientsstats\nrecom_epg_archive_personal\nrecom_media_items_similars\nmenu_items\naccount_recommendations\nkaraoke_items\nmedia_items\nbanners\n#queriesperfstats\nrecom_media_items_default\nrecom_epg_live_personal\nservices\n#memstats\nchannels\nmedia_item_recommendations\nwp_tasks_tasks\nepg' +elif [ $VERSION == 4 ]; then + LATEST_RELEASE=$(python3 cpp_src/cmd/reindexer_server/test/get_last_rx_version.py -v 4) + # replicationstats ns added for v4 + namespaces_list_expected=$'purchase_options_ext_dict\nchild_account_recommendations\n#config\n#activitystats\n#replicationstats\nradio_channels\ncollections\n#namespaces\nwp_imports_tasks\nepg_genres\nrecom_media_items_personal\nrecom_epg_archive_default\n#perfstats\nrecom_epg_live_default\nmedia_view_templates\nasset_video_servers\nwp_tasks_schedule\nadmin_roles\n#clientsstats\nrecom_epg_archive_personal\nrecom_media_items_similars\nmenu_items\naccount_recommendations\nkaraoke_items\nmedia_items\nbanners\n#queriesperfstats\nrecom_media_items_default\nrecom_epg_live_personal\nservices\n#memstats\nchannels\nmedia_item_recommendations\nwp_tasks_tasks\nepg' +else + echo "Unknown version" + exit 1 +fi + +echo "## downloading latest release rpm file: $LATEST_RELEASE" +curl "http://repo.itv.restr.im/itv-api-ng/7/x86_64/$LATEST_RELEASE" --output $LATEST_RELEASE; +echo "## downloading example DB" +curl "https://git.restream.ru/MaksimKravchuk/reindexer_testdata/-/raw/master/big.zip" --output big.zip; +unzip -o big.zip # unzips into mydb_big.rxdump; + +ADDRESS="cproto://127.0.0.1:6534/" +DB_NAME="test" + +memstats_expected=$'[ +{"replication":{"data_hash":24651210926,"data_count":3}}, +{"replication":{"data_hash":6252344969,"data_count":1}}, +{"replication":{"data_hash":37734732881,"data_count":28}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":1024095024522,"data_count":1145}}, +{"replication":{"data_hash":8373644068,"data_count":1315}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":7404222244,"data_count":97}}, +{"replication":{"data_hash":94132837196,"data_count":4}}, +{"replication":{"data_hash":1896088071,"data_count":2}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":-672103903,"data_count":33538}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":6833710705,"data_count":1}}, +{"replication":{"data_hash":5858155773472,"data_count":4500}}, +{"replication":{"data_hash":-473221280268823592,"data_count":65448}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":8288213744,"data_count":3}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":354171024786967,"data_count":3941}}, +{"replication":{"data_hash":-6520334670,"data_count":35886}}, +{"replication":{"data_hash":112772074632,"data_count":281}}, +{"replication":{"data_hash":-12679568198538,"data_count":1623116}} +] +Returned 27 rows' + +echo "##### Forward compatibility test #####" + +DB_PATH=$(pwd)"/rx_db" + +echo "Database: "$DB_PATH + +echo "## installing latest release: $LATEST_RELEASE" +yum install -y $LATEST_RELEASE > /dev/null; +# run RX server with disabled logging +reindexer_server -l warning --httplog=none --rpclog=none --db $DB_PATH & +server_pid=$! +sleep 2; + +reindexer_tool --dsn $ADDRESS$DB_NAME -f mydb_big.rxdump --createdb; +sleep 1; + +namespaces_1=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command '\namespaces list'); +echo $namespaces_1; +CompareNamespacesLists "${namespaces_1[@]}" "${namespaces_list_expected[@]}" $server_pid; + +memstats_1=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command 'select replication.data_hash, replication.data_count from #memstats'); +CompareMemstats "${memstats_1[@]}" "${memstats_expected[@]}" $server_pid; + +KillAndRemoveServer $server_pid; + +echo "## installing current version: $RX_SERVER_CURRENT_VERSION_RPM" +yum install -y build/*.rpm > /dev/null; +reindexer_server -l0 --corelog=none --httplog=none --rpclog=none --db $DB_PATH & +server_pid=$! +sleep 2; + +WaitForDB + +namespaces_2=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command '\namespaces list'); +echo $namespaces_2; +CompareNamespacesLists "${namespaces_2[@]}" "${namespaces_1[@]}" $server_pid; + +memstats_2=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command 'select replication.data_hash, replication.data_count from #memstats'); +CompareMemstats "${memstats_2[@]}" "${memstats_1[@]}" $server_pid; + +KillAndRemoveServer $server_pid; +rm -rf $DB_PATH; +sleep 1; + +echo "##### Backward compatibility test #####" + +echo "## installing current version: $RX_SERVER_CURRENT_VERSION_RPM" +yum install -y build/*.rpm > /dev/null; +reindexer_server -l warning --httplog=none --rpclog=none --db $DB_PATH & +server_pid=$! +sleep 2; + +reindexer_tool --dsn $ADDRESS$DB_NAME -f mydb_big.rxdump --createdb; +sleep 1; + +namespaces_3=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command '\namespaces list'); +echo $namespaces_3; +CompareNamespacesLists "${namespaces_3[@]}" "${namespaces_list_expected[@]}" $server_pid; + +memstats_3=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command 'select replication.data_hash, replication.data_count from #memstats'); +CompareMemstats "${memstats_3[@]}" "${memstats_expected[@]}" $server_pid; + +KillAndRemoveServer $server_pid; + +echo "## installing latest release: $LATEST_RELEASE" +yum install -y $LATEST_RELEASE > /dev/null; +reindexer_server -l warning --httplog=none --rpclog=none --db $DB_PATH & +server_pid=$! +sleep 2; + +WaitForDB + +namespaces_4=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command '\namespaces list'); +echo $namespaces_4; +CompareNamespacesLists "${namespaces_4[@]}" "${namespaces_3[@]}" $server_pid; + +memstats_4=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command 'select replication.data_hash, replication.data_count from #memstats'); +CompareMemstats "${memstats_4[@]}" "${memstats_3[@]}" $server_pid; + +KillAndRemoveServer $server_pid; +rm -rf $DB_PATH; diff --git a/cpp_src/cmd/reindexer_tool/CMakeLists.txt b/cpp_src/cmd/reindexer_tool/CMakeLists.txt index 18f713019..0d6bd7117 100644 --- a/cpp_src/cmd/reindexer_tool/CMakeLists.txt +++ b/cpp_src/cmd/reindexer_tool/CMakeLists.txt @@ -8,34 +8,34 @@ if (WITH_CPPTRACE) list(APPEND REINDEXER_LIBRARIES cpptrace ${REINDEXER_LIBRARIES}) endif() -if(MSVC) +if (MSVC) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -DEBUG") endif() -if (NOT WITH_STDLIB_DEBUG) +if (NOT MSVC AND NOT WITH_STDLIB_DEBUG) find_library(ReplXX_LIBRARY NAMES ${ReplXX_NAMES} replxx) find_path(ReplXX_INCLUDE_DIR NAMES replxx.hxx HINTS /opt/local/include /usr/local/include /usr/include) if (NOT ReplXX_LIBRARY OR NOT ReplXX_INCLUDE_DIR) - # replxx not found. Download it - message (STATUS "ReplXX not found. Will download it") + # replxx not found. Download it + message(STATUS "ReplXX not found. Will download it") ExternalProject_Add( replxx_lib GIT_REPOSITORY "https://github.com/Restream/replxx" GIT_TAG "b50b7b7a8c2835b45607cffabc18e4742072e9e6" CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR} ) - include_directories (${CMAKE_CURRENT_BINARY_DIR}/include) + include_directories(${CMAKE_CURRENT_BINARY_DIR}/include) link_directories(${CMAKE_CURRENT_BINARY_DIR}/lib) list(APPEND REINDEXER_LIBRARIES replxx) add_definitions(-DREINDEX_WITH_REPLXX) - else () - message (STATUS "Found ReplXX: ${ReplXX_LIBRARY}") + else() + message(STATUS "Found ReplXX: ${ReplXX_LIBRARY}") include_directories(${ReplXX_INCLUDE_DIR}) list(APPEND REINDEXER_LIBRARIES ${ReplXX_LIBRARY}) add_definitions(-DREINDEX_WITH_REPLXX) - endif () -endif () + endif() +endif() file(GLOB_RECURSE SRCS *.h *.cc) add_executable(${TARGET} ${SRCS}) @@ -43,11 +43,11 @@ add_executable(${TARGET} ${SRCS}) # Enable export to provide readble stacktraces set_property(TARGET ${TARGET} PROPERTY ENABLE_EXPORTS 1) -if (NOT WITH_STDLIB_DEBUG) +if (NOT MSVC AND NOT WITH_STDLIB_DEBUG) if (NOT ReplXX_LIBRARY OR NOT ReplXX_INCLUDE_DIR) add_dependencies(${TARGET} replxx_lib) - endif () -endif () + endif() +endif() target_link_libraries(${TARGET} ${REINDEXER_LIBRARIES} ) @@ -57,7 +57,7 @@ install(TARGETS ${TARGET} ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" ) -if(MSVC) +if (MSVC) install(FILES $ DESTINATION ${CMAKE_INSTALL_BINDIR} OPTIONAL) endif() diff --git a/cpp_src/cmd/reindexer_tool/commandsexecutor.cc b/cpp_src/cmd/reindexer_tool/commandsexecutor.cc index 4ba0bc1a0..e5e4ff453 100644 --- a/cpp_src/cmd/reindexer_tool/commandsexecutor.cc +++ b/cpp_src/cmd/reindexer_tool/commandsexecutor.cc @@ -797,7 +797,7 @@ Error CommandsExecutor::commandNamespaces(const std::string& comman std::string_view subCommand = parser.NextToken(); if (iequals(subCommand, "add")) { - auto nsName = reindexer::unescapeString(parser.NextToken()); + parser.NextToken(); // nsName NamespaceDef def(""); Error err = def.FromJSON(reindexer::giftStr(parser.CurPtr())); @@ -966,9 +966,10 @@ Error CommandsExecutor::commandBench(const std::string& command) { std::atomic count(0), errCount(0); auto worker = std::bind(getBenchWorkerFn(count, errCount), deadline); - auto threads = std::unique_ptr(new std::thread[numThreads_]); - for (int i = 0; i < numThreads_; i++) threads[i] = std::thread(worker); - for (int i = 0; i < numThreads_; i++) threads[i].join(); + const auto numThreads = std::min(std::max(numThreads_, 1u), 65535u); + auto threads = std::unique_ptr(new std::thread[numThreads]); + for (unsigned i = 0; i < numThreads; i++) threads[i] = std::thread(worker); + for (unsigned i = 0; i < numThreads; i++) threads[i].join(); output_() << "Done. Got " << count / benchTime << " QPS, " << errCount << " errors" << std::endl; return err; diff --git a/cpp_src/cmd/reindexer_tool/commandsexecutor.h b/cpp_src/cmd/reindexer_tool/commandsexecutor.h index b31fdafa7..2a6ff0af9 100644 --- a/cpp_src/cmd/reindexer_tool/commandsexecutor.h +++ b/cpp_src/cmd/reindexer_tool/commandsexecutor.h @@ -40,7 +40,7 @@ class CommandsExecutor : public reindexer::IUpdatesObserver { }; template - CommandsExecutor(const std::string& outFileName, int numThreads, Args... args) + CommandsExecutor(const std::string& outFileName, unsigned numThreads, Args... args) : db_(std::move(args)...), output_(outFileName), numThreads_(numThreads) {} CommandsExecutor(const CommandsExecutor&) = delete; CommandsExecutor(CommandsExecutor&&) = delete; @@ -220,7 +220,7 @@ class CommandsExecutor : public reindexer::IUpdatesObserver { CancelContext cancelCtx_; DBInterface db_; Output output_; - int numThreads_; + unsigned numThreads_ = 1; std::unordered_map variables_; httpparser::UrlParser uri_; reindexer::net::ev::async cmdAsync_; diff --git a/cpp_src/cmd/reindexer_tool/commandsprocessor.h b/cpp_src/cmd/reindexer_tool/commandsprocessor.h index 1a87812f0..85ed9df2d 100644 --- a/cpp_src/cmd/reindexer_tool/commandsprocessor.h +++ b/cpp_src/cmd/reindexer_tool/commandsprocessor.h @@ -17,7 +17,7 @@ template class CommandsProcessor { public: template - CommandsProcessor(const std::string& outFileName, const std::string& inFileName, int numThreads, Args... args) + CommandsProcessor(const std::string& outFileName, const std::string& inFileName, unsigned numThreads, Args... args) : inFileName_(inFileName), executor_(outFileName, numThreads, std::move(args)...) {} CommandsProcessor(const CommandsProcessor&) = delete; CommandsProcessor(CommandsProcessor&&) = delete; diff --git a/cpp_src/cmd/reindexer_tool/reindexer_tool.cc b/cpp_src/cmd/reindexer_tool/reindexer_tool.cc index 895172d13..2e2368a85 100644 --- a/cpp_src/cmd/reindexer_tool/reindexer_tool.cc +++ b/cpp_src/cmd/reindexer_tool/reindexer_tool.cc @@ -72,8 +72,8 @@ int main(int argc, char* argv[]) { args::ValueFlag outFileName(progOptions, "FILENAME", "send query results to file", {'o', "output"}, "", Options::Single | Options::Global); - args::ValueFlag connThreads(progOptions, "INT", "Number of threads(connections) used by db connector", {'t', "threads"}, 1, - Options::Single | Options::Global); + args::ValueFlag connThreads(progOptions, "INT=1..65535", "Number of threads(connections) used by db connector", + {'t', "threads"}, 1, Options::Single | Options::Global); args::Flag createDBF(progOptions, "", "Enable created database if missed", {"createdb"}); @@ -163,7 +163,6 @@ int main(int argc, char* argv[]) { err = commandsProcessor.Connect(dsn, reindexer::client::ConnectOpts().CreateDBIfMissing(createDBF && args::get(createDBF))); if (err.ok()) ok = commandsProcessor.Run(args::get(command)); } else if (checkIfStartsWithCS("builtin://"sv, dsn)) { - reindexer::Reindexer db; CommandsProcessor commandsProcessor(args::get(outFileName), args::get(fileName), args::get(connThreads)); err = commandsProcessor.Connect(dsn, ConnectOpts().DisableReplication()); if (err.ok()) ok = commandsProcessor.Run(args::get(command)); diff --git a/cpp_src/core/activity_context.cc b/cpp_src/core/activity_context.cc index 45c014795..af2c7d52c 100644 --- a/cpp_src/core/activity_context.cc +++ b/cpp_src/core/activity_context.cc @@ -106,10 +106,6 @@ RdxActivityContext::RdxActivityContext(std::string_view activityTracer, std::str ipConnectionId, Activity::InProgress, system_clock_w::now(), ""sv}, state_(serializeState(clientState ? Activity::Sending : Activity::InProgress)), parent_(&parent) -#ifndef NDEBUG - , - refCount_(0u) -#endif { parent_->Register(this); } @@ -119,10 +115,6 @@ RdxActivityContext::RdxActivityContext(RdxActivityContext&& other) : data_(other.data_), state_(other.state_.load(std::memory_order_relaxed)), parent_(other.parent_) -#ifndef NDEBUG - , - refCount_(0u) -#endif { if (parent_) parent_->Reregister(&other, this); other.parent_ = nullptr; diff --git a/cpp_src/core/cjson/baseencoder.cc b/cpp_src/core/cjson/baseencoder.cc index bde4cb007..6cf522e6f 100644 --- a/cpp_src/core/cjson/baseencoder.cc +++ b/cpp_src/core/cjson/baseencoder.cc @@ -150,24 +150,24 @@ bool BaseEncoder::encode(ConstPayload* pl, Serializer& rdser, Builder& throw Error(errParams, "Non-array field '%s' [%d] from '%s' can only be encoded once.", f.Name(), tagField, pl->Type().Name()); } assertrx(tagField < pl->NumFields()); - int* cnt = &fieldsoutcnt_[tagField]; + int& cnt = fieldsoutcnt_[tagField]; switch (tagType) { case TAG_ARRAY: { const auto count = rdser.GetVarUint(); if (visible) { pl->Type().Field(tagField).Type().EvaluateOneOf( - [&](KeyValueType::Bool) { builder.Array(tagName, pl->GetArray(tagField).subspan(*cnt, count), *cnt); }, - [&](KeyValueType::Int) { builder.Array(tagName, pl->GetArray(tagField).subspan(*cnt, count), *cnt); }, - [&](KeyValueType::Int64) { builder.Array(tagName, pl->GetArray(tagField).subspan(*cnt, count), *cnt); }, - [&](KeyValueType::Double) { builder.Array(tagName, pl->GetArray(tagField).subspan(*cnt, count), *cnt); }, - [&](KeyValueType::String) { builder.Array(tagName, pl->GetArray(tagField).subspan(*cnt, count), *cnt); }, - [&](KeyValueType::Uuid) { builder.Array(tagName, pl->GetArray(tagField).subspan(*cnt, count), *cnt); }, + [&](KeyValueType::Bool) { builder.Array(tagName, pl->GetArray(tagField).subspan(cnt, count), cnt); }, + [&](KeyValueType::Int) { builder.Array(tagName, pl->GetArray(tagField).subspan(cnt, count), cnt); }, + [&](KeyValueType::Int64) { builder.Array(tagName, pl->GetArray(tagField).subspan(cnt, count), cnt); }, + [&](KeyValueType::Double) { builder.Array(tagName, pl->GetArray(tagField).subspan(cnt, count), cnt); }, + [&](KeyValueType::String) { builder.Array(tagName, pl->GetArray(tagField).subspan(cnt, count), cnt); }, + [&](KeyValueType::Uuid) { builder.Array(tagName, pl->GetArray(tagField).subspan(cnt, count), cnt); }, [](OneOf) noexcept { assertrx(0); abort(); }); } - (*cnt) += count; + cnt += count; break; } case TAG_NULL: @@ -182,8 +182,8 @@ bool BaseEncoder::encode(ConstPayload* pl, Serializer& rdser, Builder& case TAG_OBJECT: case TAG_UUID: objectScalarIndexes_.set(tagField); - if (visible) builder.Put(tagName, pl->Get(tagField, (*cnt)), *cnt); - ++(*cnt); + if (visible) builder.Put(tagName, pl->Get(tagField, cnt), cnt); + ++cnt; break; } } else { diff --git a/cpp_src/core/cjson/baseencoder.h b/cpp_src/core/cjson/baseencoder.h index 4b594986f..1ad04b51c 100644 --- a/cpp_src/core/cjson/baseencoder.h +++ b/cpp_src/core/cjson/baseencoder.h @@ -23,7 +23,7 @@ class IEncoderDatasourceWithJoins { virtual size_t GetJoinedRowsCount() const noexcept = 0; virtual size_t GetJoinedRowItemsCount(size_t rowId) const = 0; virtual ConstPayload GetJoinedItemPayload(size_t rowid, size_t plIndex) const = 0; - virtual const std::string &GetJoinedItemNamespace(size_t rowid) noexcept = 0; + virtual const std::string &GetJoinedItemNamespace(size_t rowid) const noexcept = 0; virtual const TagsMatcher &GetJoinedItemTagsMatcher(size_t rowid) noexcept = 0; virtual const FieldsSet &GetJoinedItemFieldsFilter(size_t rowid) noexcept = 0; }; @@ -62,7 +62,7 @@ class BaseEncoder { std::string_view getPlTuple(ConstPayload &pl); const TagsMatcher *tagsMatcher_; - int fieldsoutcnt_[kMaxIndexes]; + std::array fieldsoutcnt_{0}; const FieldsSet *filter_; WrSerializer tmpPlTuple_; TagsPath curTagsPath_; diff --git a/cpp_src/core/cjson/cjsonmodifier.cc b/cpp_src/core/cjson/cjsonmodifier.cc index 66a7c2545..72e970bd6 100644 --- a/cpp_src/core/cjson/cjsonmodifier.cc +++ b/cpp_src/core/cjson/cjsonmodifier.cc @@ -26,16 +26,16 @@ class CJsonModifier::Context { } } - std::fill(std::begin(fieldsArrayOffsets), std::end(fieldsArrayOffsets), 0); + std::fill(fieldsArrayOffsets.begin(), fieldsArrayOffsets.end(), 0); } - bool IsForAllItems() const noexcept { return isForAllItems_; } + [[nodiscard]] bool IsForAllItems() const noexcept { return isForAllItems_; } const VariantArray &value; WrSerializer &wrser; Serializer rdser; TagsPath jsonPath; IndexedTagsPath currObjPath; - FieldModifyMode mode; + FieldModifyMode mode = FieldModeSet; bool fieldUpdated = false; bool updateArrayElements = false; const Payload *payload = nullptr; @@ -45,119 +45,112 @@ class CJsonModifier::Context { bool isForAllItems_ = false; }; -void CJsonModifier::SetFieldValue(std::string_view tuple, IndexedTagsPath fieldPath, const VariantArray &val, WrSerializer &ser, +void CJsonModifier::SetFieldValue(std::string_view tuple, const IndexedTagsPath &fieldPath, const VariantArray &val, WrSerializer &ser, const Payload &pl) { - if (fieldPath.empty()) { - throw Error(errLogic, kWrongFieldsAmountMsg); - } - tagsPath_.clear(); - Context ctx(fieldPath, val, ser, tuple, FieldModeSet, &pl); - fieldPath_ = std::move(fieldPath); + auto ctx = initState(tuple, fieldPath, val, ser, &pl, FieldModifyMode::FieldModeSet); updateFieldInTuple(ctx); if (!ctx.fieldUpdated && !ctx.IsForAllItems()) { throw Error(errParams, "[SetFieldValue] Requested field or array's index was not found"); } } -void CJsonModifier::SetObject(std::string_view tuple, IndexedTagsPath fieldPath, const VariantArray &val, WrSerializer &ser, +void CJsonModifier::SetObject(std::string_view tuple, const IndexedTagsPath &fieldPath, const VariantArray &val, WrSerializer &ser, const Payload &pl) { - if (fieldPath.empty()) { - throw Error(errLogic, kWrongFieldsAmountMsg); - } - tagsPath_.clear(); - Context ctx(fieldPath, val, ser, tuple, FieldModeSetJson, &pl); - fieldPath_ = std::move(fieldPath); + auto ctx = initState(tuple, fieldPath, val, ser, &pl, FieldModifyMode::FieldModeSetJson); buildCJSON(ctx); if (!ctx.fieldUpdated && !ctx.IsForAllItems()) { throw Error(errParams, "[SetObject] Requested field or array's index was not found"); } } -void CJsonModifier::RemoveField(std::string_view tuple, IndexedTagsPath fieldPath, WrSerializer &wrser) { +void CJsonModifier::RemoveField(std::string_view tuple, const IndexedTagsPath &fieldPath, WrSerializer &wrser) { + auto ctx = initState(tuple, fieldPath, {}, wrser, nullptr, FieldModeDrop); + dropFieldInTuple(ctx); +} + +CJsonModifier::Context CJsonModifier::initState(std::string_view tuple, const IndexedTagsPath &fieldPath, const VariantArray &val, + WrSerializer &ser, const Payload *pl, FieldModifyMode mode) { if (fieldPath.empty()) { throw Error(errLogic, kWrongFieldsAmountMsg); } tagsPath_.clear(); - Context ctx(fieldPath, {}, wrser, tuple, FieldModeDrop); - fieldPath_ = std::move(fieldPath); - dropFieldInTuple(ctx); + Context ctx(fieldPath, val, ser, tuple, mode, pl); + fieldPath_ = fieldPath; + + return ctx; } -void CJsonModifier::updateObject(Context &ctx, int tagName) { +void CJsonModifier::updateObject(Context &ctx, int tagName) const { + ctx.fieldUpdated = true; JsonDecoder jsonDecoder(tagsMatcher_); if (ctx.value.IsArrayValue()) { CJsonBuilder cjsonBuilder(ctx.wrser, ObjType::TypeArray, &tagsMatcher_, tagName); - for (size_t i = 0; i < ctx.value.size(); ++i) { + for (const auto &item : ctx.value) { auto objBuilder = cjsonBuilder.Object(nullptr); - jsonDecoder.Decode(std::string_view(ctx.value[i]), objBuilder, ctx.jsonPath); + jsonDecoder.Decode(std::string_view(item), objBuilder, ctx.jsonPath); } - } else { - assertrx(ctx.value.size() == 1); - CJsonBuilder cjsonBuilder(ctx.wrser, ObjType::TypeObject, &tagsMatcher_, tagName); - jsonDecoder.Decode(std::string_view(ctx.value.front()), cjsonBuilder, ctx.jsonPath); + return; } - ctx.fieldUpdated = true; -} -void CJsonModifier::updateField(Context &ctx, size_t idx) { - assertrx(idx < ctx.value.size()); - copyCJsonValue(kvType2Tag(ctx.value[idx].Type()), ctx.value[idx], ctx.wrser); + assertrx(ctx.value.size() == 1); + CJsonBuilder cjsonBuilder(ctx.wrser, ObjType::TypeObject, &tagsMatcher_, tagName); + jsonDecoder.Decode(std::string_view(ctx.value.front()), cjsonBuilder, ctx.jsonPath); } -void CJsonModifier::insertField(Context &ctx) { +void CJsonModifier::insertField(Context &ctx) const { ctx.fieldUpdated = true; assertrx(ctx.currObjPath.size() < fieldPath_.size()); int nestedObjects = 0; for (size_t i = ctx.currObjPath.size(); i < fieldPath_.size(); ++i) { - int tagName = fieldPath_[i].NameTag(); + const int tagName = fieldPath_[i].NameTag(); const bool finalTag = (i == fieldPath_.size() - 1); if (finalTag) { if (ctx.mode == FieldModeSetJson) { updateObject(ctx, tagName); - } else { - const int field = tagsMatcher_.tags2field(ctx.jsonPath.data(), fieldPath_.size()); - const TagType tagType = determineUpdateTagType(ctx, field); - if (field > 0) { - putCJsonRef(tagType, tagName, field, ctx.value, ctx.wrser); - } else { - putCJsonValue(tagType, tagName, ctx.value, ctx.wrser); - } + continue; } - } else { - ctx.wrser.PutCTag(ctag{TAG_OBJECT, tagName}); - ++nestedObjects; + + const int field = tagsMatcher_.tags2field(ctx.jsonPath.data(), fieldPath_.size()); + const TagType tagType = determineUpdateTagType(ctx, field); + isIndexed(field) ? putCJsonRef(tagType, tagName, field, ctx.value, ctx.wrser) + : putCJsonValue(tagType, tagName, ctx.value, ctx.wrser); + continue; } + + ctx.wrser.PutCTag(ctag{TAG_OBJECT, tagName}); + ++nestedObjects; } - while (nestedObjects-- > 0) ctx.wrser.PutCTag(kCTagEnd); + while (nestedObjects-- > 0) { + ctx.wrser.PutCTag(kCTagEnd); + } ctx.currObjPath.clear(); } -bool CJsonModifier::needToInsertField(const Context &ctx) { - if (ctx.fieldUpdated) return false; - if (fieldPath_.back().IsArrayNode()) return false; - if (ctx.currObjPath.size() < fieldPath_.size()) { - for (unsigned i = 0; i < ctx.currObjPath.size(); ++i) { - if (fieldPath_[i] != ctx.currObjPath[i]) { - return false; - } - } - if (ctx.IsForAllItems()) { - throw Error(errParams, "Unable to insert new field with 'all items ([*])' syntax"); +bool CJsonModifier::needToInsertField(const Context &ctx) const { + assertrx_throw(!fieldPath_.empty()); + if (ctx.fieldUpdated || fieldPath_.back().IsArrayNode()) return false; + if (ctx.currObjPath.size() >= fieldPath_.size()) return false; + assertrx_throw(ctx.currObjPath.size() <= fieldPath_.size()); + for (unsigned i = 0; i < ctx.currObjPath.size(); ++i) { + if (fieldPath_[i] != ctx.currObjPath[i]) { + return false; } - for (unsigned i = ctx.currObjPath.size(); i < fieldPath_.size(); ++i) { - if (fieldPath_[i].IsArrayNode()) { - return false; - } + } + if (ctx.IsForAllItems()) { + throw Error(errParams, "Unable to insert new field with 'all items ([*])' syntax"); + } + for (unsigned i = ctx.currObjPath.size(); i < fieldPath_.size(); ++i) { + if (fieldPath_[i].IsArrayNode()) { + return false; } - return true; } - return false; + return true; } -TagType CJsonModifier::determineUpdateTagType(const Context &ctx, int field) { - if (field >= 0) { +TagType CJsonModifier::determineUpdateTagType(const Context &ctx, int field) const { + if (isIndexed(field)) { const PayloadFieldType &fieldType = pt_.Field(field); if (!fieldType.IsArray() || ctx.updateArrayElements || !ctx.value.IsNullValue()) { for (auto &v : ctx.value) { @@ -167,14 +160,6 @@ TagType CJsonModifier::determineUpdateTagType(const Context &ctx, int field) { } } } - } else if (ctx.value.size() > 1) { - const auto type = kvType2Tag(ctx.value.front().Type()); - for (auto it = ctx.value.begin() + 1, end = ctx.value.end(); it != end; ++it) { - if (type != kvType2Tag(it->Type())) { - throw Error(errParams, "Unable to update field with heterogeneous array. Type[0] is [%s] and type[%d] is [%s]", - TagTypeToStr(type), it - ctx.value.begin(), TagTypeToStr(kvType2Tag(it->Type()))); - } - } } if (ctx.updateArrayElements || ctx.value.IsArrayValue()) { @@ -182,146 +167,252 @@ TagType CJsonModifier::determineUpdateTagType(const Context &ctx, int field) { } else if (ctx.value.IsNullValue() || ctx.value.empty()) { return TAG_NULL; } - return kvType2Tag(ctx.value.front().Type()); + return arrayKvType2Tag(ctx.value); } -bool CJsonModifier::checkIfFoundTag(Context &ctx, bool isLastItem) { - if (tagsPath_.empty()) return false; - bool result = fieldPath_.Compare(tagsPath_); - if (result) { - if (fieldPath_.back().IsArrayNode()) { - if (fieldPath_.back().IsForAllItems()) { - if (isLastItem) ctx.fieldUpdated = true; - } else { - ctx.fieldUpdated = true; +bool CJsonModifier::checkIfFoundTag(Context &ctx, bool isLastItem) const { + if (tagsPath_.empty() || !fieldPath_.Compare(tagsPath_)) return false; + + const auto &backFieldPath = fieldPath_.back(); + if (!backFieldPath.IsArrayNode() || ((!backFieldPath.IsForAllItems() || isLastItem))) { + ctx.fieldUpdated = true; + } + + return true; +} + +void CJsonModifier::setArray(Context &ctx) const { + auto type = arrayKvType2Tag(ctx.value); + ctx.wrser.PutCArrayTag(carraytag{ctx.value.size(), type}); + const bool isObjsArr = (type == TAG_OBJECT); + for (const auto &item : ctx.value) { + if (isObjsArr) { + type = kvType2Tag(item.Type()); + ctx.wrser.PutCTag(ctag{type}); + } + copyCJsonValue(type, item, ctx.wrser); + } +} + +void CJsonModifier::writeCTag(const ctag &tag, Context &ctx) { + bool tagMatched = checkIfFoundTag(ctx); + const TagType tagType = tag.Type(); + const int field = tag.Field(); + const int tagName = tag.Name(); + if (tagType == TAG_ARRAY) { + const auto count = ctx.rdser.GetVarUint(); + if (!tagMatched || !ctx.fieldUpdated) { + auto &lastTag = tagsPath_.back(); + for (uint64_t i = 0; i < count; ++i) { + lastTag.SetIndex(i); + const bool isLastItem = (i + 1 == count); + tagMatched = checkIfFoundTag(ctx, isLastItem); + if (tagMatched && ctx.fieldUpdated) { + break; + } } - } else { + } + + if (tagMatched && ctx.fieldUpdated) { + const auto resultTagType = determineUpdateTagType(ctx, field); + ctx.wrser.PutCTag(ctag{resultTagType, tagName, field}); + if (resultTagType == TAG_ARRAY) { + ctx.wrser.PutVarUint(ctx.updateArrayElements ? count : ctx.value.size()); + } + return; + } + + ctx.wrser.PutCTag(ctag{tagType, tagName, field}); + ctx.wrser.PutVarUint(count); + return; + } + + if (!tagMatched) { + ctx.wrser.PutCTag(ctag{tagType, tagName, field}); + return; + } + + if (ctx.updateArrayElements) { + throw Error(errParams, "Unable to update scalar value by index"); + } + const auto resultTagType = determineUpdateTagType(ctx, field); + ctx.wrser.PutCTag(ctag{resultTagType, tagName, field}); + if (resultTagType == TAG_ARRAY) { + ctx.wrser.PutVarUint(ctx.value.size()); + } +} + +void CJsonModifier::updateArray(TagType atagType, uint32_t count, int tagName, Context &ctx) { + assertrx_throw(!ctx.value.IsArrayValue()); // Unable to update array's element with array-value + + Variant value; + if (!ctx.value.empty()) { + value = ctx.value.front(); + } + + // situation is possible when array was homogeneous, and new element of different type is added + // in this case array must change type and become heterogeneous + const auto valueType = kvType2Tag(value.Type()); + assertrx((atagType != valueType) || (atagType != TAG_OBJECT)); + + ctx.wrser.PutCArrayTag(carraytag{count, TAG_OBJECT}); + + for (uint32_t i = 0; i < count; i++) { + tagsPath_.back().SetIndex(i); + const bool isLastItem = (i + 1 == count); + if (checkIfFoundTag(ctx, isLastItem)) { + (atagType == TAG_OBJECT) ? skipCjsonTag(ctag{ctx.rdser.GetCTag().Type()}, ctx.rdser, &ctx.fieldsArrayOffsets) + : skipCjsonTag(ctag{atagType}, ctx.rdser, &ctx.fieldsArrayOffsets); + ctx.wrser.PutCTag(ctag{valueType}); + copyCJsonValue(valueType, value, ctx.wrser); + ctx.fieldUpdated = true; + continue; // next item + } + + switch (atagType) { + case TAG_OBJECT: { + TagsPathScope pathScopeObj(ctx.currObjPath, tagName, i); + updateFieldInTuple(ctx); + break; + } + case TAG_VARINT: + case TAG_DOUBLE: + case TAG_STRING: + case TAG_ARRAY: + case TAG_NULL: + case TAG_BOOL: + case TAG_END: + case TAG_UUID: + // array tag type updated (need store as object) + ctx.wrser.PutCTag(ctag{atagType}); + copyCJsonValue(atagType, ctx.rdser, ctx.wrser); + break; + } + } + + assertrx_throw(ctx.fieldUpdated); +} + +void CJsonModifier::copyArray(int tagName, Context &ctx) { + const carraytag atag = ctx.rdser.GetCArrayTag(); + const TagType atagType = atag.Type(); + const auto count = atag.Count(); + + // store position in serializer + const auto rdserPos = ctx.rdser.Pos(); + const auto wrserLen = ctx.wrser.Len(); + + ctx.wrser.PutCArrayTag(atag); + + for (uint32_t i = 0; i < count; i++) { + tagsPath_.back().SetIndex(i); + const bool isLastItem = (i + 1 == count); + // update item + if (checkIfFoundTag(ctx, isLastItem)) { + if (ctx.value.IsArrayValue()) { + throw Error(errParams, "Unable to update array's element with array-value"); + } + Variant value; + if (!ctx.value.empty()) { + value = ctx.value.front(); + } + // situation is possible when array was homogeneous, and new element of different type is added + const auto valueType = kvType2Tag(value.Type()); + if ((atagType != valueType) && (atagType != TAG_OBJECT)) { + // back to beginning of array and rewrite as an array of objects + ctx.rdser.SetPos(rdserPos); + ctx.wrser.Reset(wrserLen); + updateArray(atagType, count, tagName, ctx); + return; // array updated - stop processing + } + + // type of array not changed - simple rewrite item + auto vtagType = atagType; + if (atagType == TAG_OBJECT) { + vtagType = ctx.rdser.GetCTag().Type(); + ctx.wrser.PutCTag(ctag{valueType}); + } + skipCjsonTag(ctag{vtagType}, ctx.rdser, &ctx.fieldsArrayOffsets); + copyCJsonValue(valueType, value, ctx.wrser); + + ctx.fieldUpdated = true; + continue; // next item + } + + // copy item as is + switch (atagType) { + case TAG_OBJECT: { + TagsPathScope pathScopeObj(ctx.currObjPath, tagName, i); + updateFieldInTuple(ctx); + break; + } + case TAG_VARINT: + case TAG_DOUBLE: + case TAG_STRING: + case TAG_ARRAY: + case TAG_NULL: + case TAG_BOOL: + case TAG_END: + case TAG_UUID: + copyCJsonValue(atagType, ctx.rdser, ctx.wrser); + break; } } - return result; } bool CJsonModifier::updateFieldInTuple(Context &ctx) { const ctag tag = ctx.rdser.GetCTag(); - - TagType tagType = tag.Type(); + const TagType tagType = tag.Type(); if (tagType == TAG_END) { - if (needToInsertField(ctx)) insertField(ctx); + if (needToInsertField(ctx)) { + insertField(ctx); + } ctx.wrser.PutCTag(kCTagEnd); return false; } - const int field = tag.Field(); + const int tagName = tag.Name(); TagsPathScope pathScope(tagsPath_, tagName); - bool tagMatched = checkIfFoundTag(ctx); - if (field >= 0) { - if (tagType == TAG_ARRAY) { - const int count = ctx.rdser.GetVarUint(); - if (!tagMatched || !ctx.fieldUpdated) { - auto &lastTag = tagsPath_.back(); - for (int i = 0; i < count; ++i) { - lastTag.SetIndex(i); - const bool isLastItem = (i + 1 == count); - tagMatched = checkIfFoundTag(ctx, isLastItem); - if (tagMatched && ctx.fieldUpdated) { - break; - } - } - } + const int field = tag.Field(); + if (isIndexed(field)) { + writeCTag(tag, ctx); + return true; + } - if (tagMatched && ctx.fieldUpdated) { - const auto resultTagType = determineUpdateTagType(ctx, field); - ctx.wrser.PutCTag(ctag{resultTagType, tagName, field}); + const bool tagMatched = checkIfFoundTag(ctx); + const auto resultTagType = tagMatched ? determineUpdateTagType(ctx, field) : tagType; + ctx.wrser.PutCTag(ctag{resultTagType, tagName, field}); - if (resultTagType == TAG_ARRAY) { - if (ctx.updateArrayElements) { - ctx.wrser.PutVarUint(count); - } else { - ctx.wrser.PutVarUint(ctx.value.size()); - } - } - } else { - ctx.wrser.PutCTag(ctag{tagType, tagName, field}); - ctx.wrser.PutVarUint(count); - } - } else { - if (tagMatched) { - if (ctx.updateArrayElements) { - throw Error(errParams, "Unable to update scalar value by index"); - } - const auto resultTagType = determineUpdateTagType(ctx, field); - ctx.wrser.PutCTag(ctag{resultTagType, tagName, field}); - - if (resultTagType == TAG_ARRAY) { - ctx.wrser.PutVarUint(ctx.value.size()); - } + if (tagMatched) { + if (ctx.updateArrayElements && tagType != TAG_ARRAY) { + throw Error(errParams, "Unable to update scalar value by index"); + } + if (resultTagType != TAG_NULL) { + if (resultTagType == TAG_ARRAY) { + setArray(ctx); + } else if (ctx.value.empty()) { + throw Error(errLogic, "Update value for field [%s] cannot be empty", tagsMatcher_.tag2name(tagName)); + } else if (ctx.value.size() == 1) { + const auto item = ctx.value.front(); + copyCJsonValue(kvType2Tag(item.Type()), item, ctx.wrser); } else { - ctx.wrser.PutCTag(ctag{tagType, tagName, field}); + throw Error(errParams, "Unexpected value to update"); } } - } else { - const auto resultTagType = tagMatched ? determineUpdateTagType(ctx, field) : tagType; - ctx.wrser.PutCTag(ctag{resultTagType, tagName, field}); - if (tagMatched) { - if (ctx.updateArrayElements && tagType != TAG_ARRAY) { - throw Error(errParams, "Unable to update scalar value by index"); - } - if (resultTagType != TAG_NULL) { - if (resultTagType == TAG_ARRAY) { - ctx.wrser.PutCArrayTag(carraytag{ctx.value.size(), kvType2Tag(ctx.value.ArrayType())}); - } else if (ctx.value.empty()) { - throw Error(errLogic, "Update value for field [%s] cannot be empty", tagsMatcher_.tag2name(tagName)); - } - for (size_t i = 0, size = ctx.value.size(); i < size; ++i) { - updateField(ctx, i); - } - } - skipCjsonTag(tag, ctx.rdser, &ctx.fieldsArrayOffsets); - } else if (tagType == TAG_OBJECT) { - TagsPathScope pathScope(ctx.currObjPath, tagName); - while (updateFieldInTuple(ctx)) { - } - } else if (tagType == TAG_ARRAY) { - const carraytag atag = ctx.rdser.GetCArrayTag(); - ctx.wrser.PutCArrayTag(atag); - const TagType atagType = atag.Type(); - const auto count = atag.Count(); - for (unsigned i = 0; i < count; i++) { - tagsPath_.back().SetIndex(i); - const bool isLastItem = (i + 1 == atag.Count()); - if (checkIfFoundTag(ctx, isLastItem)) { - if (ctx.value.IsArrayValue()) { - throw Error(errParams, "Unable to update non-indexed array's element with array-value"); - } - copyCJsonValue(atagType, ctx.value.front(), ctx.wrser); - skipCjsonTag(ctag{atagType}, ctx.rdser, &ctx.fieldsArrayOffsets); - } else { - switch (atagType) { - case TAG_OBJECT: { - TagsPathScope pathScope(ctx.currObjPath, tagName, i); - updateFieldInTuple(ctx); - break; - } - case TAG_VARINT: - case TAG_DOUBLE: - case TAG_STRING: - case TAG_ARRAY: - case TAG_NULL: - case TAG_BOOL: - case TAG_END: - case TAG_UUID: - copyCJsonValue(atagType, ctx.rdser, ctx.wrser); - break; - } - } - } - } else { - copyCJsonValue(tagType, ctx.rdser, ctx.wrser); + skipCjsonTag(tag, ctx.rdser, &ctx.fieldsArrayOffsets); + return true; + } + + if (tagType == TAG_OBJECT) { + TagsPathScope pathScopeObj(ctx.currObjPath, tagName); + while (updateFieldInTuple(ctx)) { } + return true; } + (tagType == TAG_ARRAY) ? copyArray(tagName, ctx) : copyCJsonValue(tagType, ctx.rdser, ctx.wrser); return true; } @@ -333,7 +424,7 @@ bool CJsonModifier::dropFieldInTuple(Context &ctx) { return false; } - int tagName = tag.Name(); + const int tagName = tag.Name(); TagsPathScope pathScope(tagsPath_, tagName); bool tagMatched = (!ctx.fieldUpdated && fieldPath_.Compare(tagsPath_)); @@ -343,73 +434,80 @@ bool CJsonModifier::dropFieldInTuple(Context &ctx) { return true; } - int field = tag.Field(); + const int field = tag.Field(); ctx.wrser.PutCTag(ctag{tagType, tagName, field}); - if (field >= 0) { + if (isIndexed(field)) { if (tagType == TAG_ARRAY) { const auto count = ctx.rdser.GetVarUint(); ctx.wrser.PutVarUint(count); } - } else { - if (tagType == TAG_OBJECT) { - TagsPathScope pathScope(ctx.currObjPath, tagName); - while (dropFieldInTuple(ctx)) { - } - } else if (tagType == TAG_ARRAY) { - carraytag atag = ctx.rdser.GetCArrayTag(); - const TagType atagType = atag.Type(); - const int size = atag.Count(); - tagMatched = (fieldPath_.back().IsArrayNode() && tagsPath_ == fieldPath_); - if (tagMatched) { - atag = carraytag(fieldPath_.back().IsForAllItems() ? 0 : size - 1, atagType); - ctx.fieldUpdated = true; + return true; + } + + if (tagType == TAG_OBJECT) { + TagsPathScope pathScopeObj(ctx.currObjPath, tagName); + while (dropFieldInTuple(ctx)) { + } + return true; + } + + if (tagType == TAG_ARRAY) { + carraytag atag = ctx.rdser.GetCArrayTag(); + const TagType atagType = atag.Type(); + const auto size = int(atag.Count()); + tagMatched = (fieldPath_.back().IsArrayNode() && tagsPath_ == fieldPath_); + if (tagMatched) { + atag = carraytag(fieldPath_.back().IsForAllItems() ? 0 : size - 1, atagType); + ctx.fieldUpdated = true; + } + + ctx.wrser.PutCArrayTag(atag); + for (int i = 0; i < size; ++i) { + tagsPath_.back().SetIndex(i); + if (tagMatched && (i == fieldPath_.back().Index() || fieldPath_.back().IsForAllItems())) { + skipCjsonTag(ctag{atagType}, ctx.rdser, &ctx.fieldsArrayOffsets); + continue; } - ctx.wrser.PutCArrayTag(atag); - for (int i = 0; i < size; i++) { - tagsPath_.back().SetIndex(i); - if (tagMatched && (i == fieldPath_.back().Index() || fieldPath_.back().IsForAllItems())) { - skipCjsonTag(ctag{atagType}, ctx.rdser, &ctx.fieldsArrayOffsets); - } else { - switch (atagType) { - case TAG_OBJECT: { - TagsPathScope pathScope(ctx.currObjPath, tagName, i); - dropFieldInTuple(ctx); - break; - } - case TAG_VARINT: - case TAG_STRING: - case TAG_DOUBLE: - case TAG_BOOL: - case TAG_ARRAY: - case TAG_NULL: - case TAG_END: - case TAG_UUID: - copyCJsonValue(atagType, ctx.rdser, ctx.wrser); - break; - } + + switch (atagType) { + case TAG_OBJECT: { + TagsPathScope pathScopeObj(ctx.currObjPath, tagName, i); + dropFieldInTuple(ctx); + break; } + case TAG_VARINT: + case TAG_STRING: + case TAG_DOUBLE: + case TAG_BOOL: + case TAG_ARRAY: + case TAG_NULL: + case TAG_END: + case TAG_UUID: + copyCJsonValue(atagType, ctx.rdser, ctx.wrser); + break; } - } else { - copyCJsonValue(tagType, ctx.rdser, ctx.wrser); } + return true; } + copyCJsonValue(tagType, ctx.rdser, ctx.wrser); return true; } -void CJsonModifier::embedFieldValue(TagType type, int field, Context &ctx, size_t idx) { - if (field < 0) { - copyCJsonValue(type, ctx.rdser, ctx.wrser); - } else { +void CJsonModifier::embedFieldValue(TagType type, int field, Context &ctx, size_t idx) const { + if (isIndexed(field)) { assertrx(ctx.payload); - Variant v = ctx.payload->Get(field, ctx.fieldsArrayOffsets[field] + idx); + const Variant v = ctx.payload->Get(field, ctx.fieldsArrayOffsets[field] + idx); copyCJsonValue(type, v, ctx.wrser); + return; } + + copyCJsonValue(type, ctx.rdser, ctx.wrser); } bool CJsonModifier::buildCJSON(Context &ctx) { - const ctag tag = ctx.rdser.GetCTag(); + const auto tag = ctx.rdser.GetCTag(); TagType tagType = tag.Type(); if (tagType == TAG_END) { if (needToInsertField(ctx)) insertField(ctx); @@ -421,26 +519,29 @@ bool CJsonModifier::buildCJSON(Context &ctx) { TagsPathScope pathScope(tagsPath_, tagName); const auto field = tag.Field(); - bool embeddedField = (field < 0); bool tagMatched = fieldPath_.Compare(tagsPath_); - if (!tagMatched) { - ctx.wrser.PutCTag(ctag{tagType, tagName}); - } else { + if (tagMatched) { tagType = TAG_OBJECT; + } else { + ctx.wrser.PutCTag(ctag{tagType, tagName}); } if (tagType == TAG_OBJECT) { if (tagMatched) { skipCjsonTag(tag, ctx.rdser, &ctx.fieldsArrayOffsets); updateObject(ctx, tagName); - } else { - TagsPathScope pathScope(ctx.currObjPath, tagName); - while (buildCJSON(ctx)) { - } + return true; } - } else if (tagType == TAG_ARRAY) { - const carraytag atag{embeddedField ? ctx.rdser.GetCArrayTag() - : carraytag(ctx.rdser.GetVarUint(), kvType2Tag(pt_.Field(tag.Field()).Type()))}; + + TagsPathScope pathScopeObj(ctx.currObjPath, tagName); + while (buildCJSON(ctx)) { + } + return true; + } + + if (tagType == TAG_ARRAY) { + const carraytag atag{isIndexed(field) ? carraytag(ctx.rdser.GetVarUint(), kvType2Tag(pt_.Field(tag.Field()).Type())) + : ctx.rdser.GetCArrayTag()}; ctx.wrser.PutCArrayTag(atag); const auto arrSize = atag.Count(); for (size_t i = 0; i < arrSize; ++i) { @@ -449,36 +550,38 @@ bool CJsonModifier::buildCJSON(Context &ctx) { if (tagMatched) { updateObject(ctx, 0); skipCjsonTag(ctx.rdser.GetCTag(), ctx.rdser, &ctx.fieldsArrayOffsets); - } else { - switch (atag.Type()) { - case TAG_OBJECT: { - TagsPathScope pathScope(ctx.currObjPath, tagName); - buildCJSON(ctx); - break; - } - case TAG_VARINT: - case TAG_DOUBLE: - case TAG_STRING: - case TAG_BOOL: - case TAG_ARRAY: - case TAG_NULL: - case TAG_END: - case TAG_UUID: - embedFieldValue(atag.Type(), field, ctx, i); - break; + continue; + } + + switch (atag.Type()) { + case TAG_OBJECT: { + TagsPathScope pathScopeObj(ctx.currObjPath, tagName); + buildCJSON(ctx); + break; } + case TAG_VARINT: + case TAG_DOUBLE: + case TAG_STRING: + case TAG_BOOL: + case TAG_ARRAY: + case TAG_NULL: + case TAG_END: + case TAG_UUID: + embedFieldValue(atag.Type(), field, ctx, i); + break; } } - if (field >= 0) { + + if (isIndexed(field)) { ctx.fieldsArrayOffsets[field] += arrSize; } - } else { - embedFieldValue(tagType, field, ctx, 0); - if (field >= 0) { - ctx.fieldsArrayOffsets[field] += 1; - } + return true; } + embedFieldValue(tagType, field, ctx, 0); + if (isIndexed(field)) { + ctx.fieldsArrayOffsets[field] += 1; + } return true; } diff --git a/cpp_src/core/cjson/cjsonmodifier.h b/cpp_src/core/cjson/cjsonmodifier.h index 5e3c12c7b..baa804d3e 100644 --- a/cpp_src/core/cjson/cjsonmodifier.h +++ b/cpp_src/core/cjson/cjsonmodifier.h @@ -11,22 +11,29 @@ class TagsMatcher; class CJsonModifier { public: CJsonModifier(TagsMatcher &tagsMatcher, PayloadType pt) noexcept : pt_(std::move(pt)), tagsMatcher_(tagsMatcher) {} - void SetFieldValue(std::string_view tuple, IndexedTagsPath path, const VariantArray &v, WrSerializer &ser, const Payload &pl); - void SetObject(std::string_view tuple, IndexedTagsPath path, const VariantArray &v, WrSerializer &ser, const Payload &pl); - void RemoveField(std::string_view tuple, IndexedTagsPath fieldPath, WrSerializer &wrser); + void SetFieldValue(std::string_view tuple, const IndexedTagsPath &fieldPath, const VariantArray &val, WrSerializer &ser, + const Payload &pl); + void SetObject(std::string_view tuple, const IndexedTagsPath &fieldPath, const VariantArray &val, WrSerializer &ser, const Payload &pl); + void RemoveField(std::string_view tuple, const IndexedTagsPath &fieldPath, WrSerializer &wrser); private: class Context; + Context initState(std::string_view tuple, const IndexedTagsPath &fieldPath, const VariantArray &val, WrSerializer &ser, + const Payload *pl, FieldModifyMode mode); bool updateFieldInTuple(Context &ctx); bool dropFieldInTuple(Context &ctx); bool buildCJSON(Context &ctx); - bool needToInsertField(const Context &ctx); - void insertField(Context &ctx); - void embedFieldValue(TagType, int field, Context &ctx, size_t idx); - void updateObject(Context &ctx, int tagName); - void updateField(Context &ctx, size_t idx); - TagType determineUpdateTagType(const Context &ctx, int field); - bool checkIfFoundTag(Context &ctx, bool isLastItem = false); + [[nodiscard]] bool needToInsertField(const Context &ctx) const; + void insertField(Context &ctx) const; + void embedFieldValue(TagType, int field, Context &ctx, size_t idx) const; + void updateObject(Context &ctx, int tagName) const; + void setArray(Context &ctx) const; + void writeCTag(const ctag &tag, Context &ctx); + void updateArray(TagType atagType, uint32_t count, int tagName, Context &ctx); + void copyArray(int TagName, Context &ctx); + [[nodiscard]] TagType determineUpdateTagType(const Context &ctx, int field) const; + [[nodiscard]] bool checkIfFoundTag(Context &ctx, bool isLastItem = false) const; + [[nodiscard]] bool isIndexed(int field) const noexcept { return (field >= 0); } PayloadType pt_; IndexedTagsPath fieldPath_, tagsPath_; diff --git a/cpp_src/core/cjson/cjsontools.cc b/cpp_src/core/cjson/cjsontools.cc index 9de00e55b..d282d146e 100644 --- a/cpp_src/core/cjson/cjsontools.cc +++ b/cpp_src/core/cjson/cjsontools.cc @@ -14,6 +14,23 @@ TagType kvType2Tag(KeyValueType kvType) noexcept { [](OneOf) noexcept -> TagType { std::abort(); }); } +TagType arrayKvType2Tag(const VariantArray &values) noexcept { + if (values.empty()) { + return TAG_NULL; + } + + auto it = values.begin(); + const auto type = kvType2Tag(it->Type()); + + ++it; + for (auto end = values.end(); it != end; ++it) { + if (type != kvType2Tag(it->Type())) { + return TAG_OBJECT; // heterogeneously array detected + } + } + return type; +} + void copyCJsonValue(TagType tagType, const Variant &value, WrSerializer &wrser) { if (value.Type().Is()) return; switch (tagType) { @@ -39,9 +56,11 @@ void copyCJsonValue(TagType tagType, const Variant &value, WrSerializer &wrser) break; case TAG_NULL: break; + case TAG_OBJECT: + wrser.PutVariant(value); + break; case TAG_ARRAY: case TAG_END: - case TAG_OBJECT: throw Error(errParseJson, "Unexpected cjson typeTag '%s' while parsing value", TagTypeToStr(tagType)); } } @@ -57,13 +76,23 @@ void putCJsonRef(TagType tagType, int tagName, int tagField, const VariantArray void putCJsonValue(TagType tagType, int tagName, const VariantArray &values, WrSerializer &wrser) { if (values.IsArrayValue()) { - const TagType elemType = kvType2Tag(values.ArrayType()); + const TagType elemType = arrayKvType2Tag(values); wrser.PutCTag(ctag{TAG_ARRAY, tagName}); wrser.PutCArrayTag(carraytag{values.size(), elemType}); - for (const Variant &value : values) copyCJsonValue(elemType, value, wrser); + if (elemType == TAG_OBJECT) { + for (const Variant &value : values) { + auto itemType = kvType2Tag(value.Type()); + wrser.PutCTag(ctag{itemType}); + copyCJsonValue(itemType, value, wrser); + } + } else { + for (const Variant &value : values) copyCJsonValue(elemType, value, wrser); + } } else if (values.size() == 1) { wrser.PutCTag(ctag{tagType, tagName}); copyCJsonValue(tagType, values.front(), wrser); + } else { + throw Error(errParams, "Unexpected value to update json value"); } } @@ -86,10 +115,13 @@ void copyCJsonValue(TagType tagType, Serializer &rdser, WrSerializer &wrser) { case TAG_UUID: wrser.PutUuid(rdser.GetUuid()); break; - case TAG_END: case TAG_OBJECT: + wrser.PutVariant(rdser.GetVariant()); + break; + case TAG_END: case TAG_ARRAY: - throw Error(errParseJson, "Unexpected cjson typeTag '%s' while parsing value", TagTypeToStr(tagType)); + default: + throw Error(errParseJson, "Unexpected cjson typeTag '%d' while parsing value", int(tagType)); } } @@ -136,7 +168,9 @@ void skipCjsonTag(ctag tag, Serializer &rdser, std::array } else if (fieldsArrayOffsets) { (*fieldsArrayOffsets)[field] += 1; } - } + } break; + default: + throw Error(errParseJson, "skipCjsonTag: unexpected ctag type value: %d", int(tag.Type())); } } @@ -155,7 +189,7 @@ void buildPayloadTuple(const PayloadIface &pl, const TagsMatcher *tagsMatcher continue; } - int tagName = tagsMatcher->name2tag(fieldType.JsonPaths()[0]); + const int tagName = tagsMatcher->name2tag(fieldType.JsonPaths()[0]); assertf(tagName != 0, "ns=%s, field=%s", pl.Type().Name(), fieldType.JsonPaths()[0]); if (fieldType.IsArray()) { diff --git a/cpp_src/core/cjson/cjsontools.h b/cpp_src/core/cjson/cjsontools.h index f6ae3f448..e6ee5a03d 100644 --- a/cpp_src/core/cjson/cjsontools.h +++ b/cpp_src/core/cjson/cjsontools.h @@ -13,6 +13,7 @@ void putCJsonRef(TagType tagType, int tagName, int tagField, const VariantArray void putCJsonValue(TagType tagType, int tagName, const VariantArray &values, WrSerializer &wrser); [[nodiscard]] TagType kvType2Tag(KeyValueType kvType) noexcept; +[[nodiscard]] TagType arrayKvType2Tag(const VariantArray &values) noexcept; void skipCjsonTag(ctag tag, Serializer &rdser, std::array *fieldsArrayOffsets = nullptr); [[nodiscard]] Variant cjsonValueToVariant(TagType tag, Serializer &rdser, KeyValueType dstType); diff --git a/cpp_src/core/cjson/ctag.h b/cpp_src/core/cjson/ctag.h index b591ad616..19567dd13 100644 --- a/cpp_src/core/cjson/ctag.h +++ b/cpp_src/core/cjson/ctag.h @@ -33,8 +33,8 @@ class ctag { static constexpr uint32_t kNameMask = (uint32_t(1) << kNameBits) - uint32_t(1); static constexpr int kNameMax = (1 << kNameBits) - 1; - constexpr explicit ctag(TagType tagType) noexcept : ctag{tagType, 0} {} - constexpr ctag(TagType tagType, int tagName, int tagField = -1) noexcept + RX_ALWAYS_INLINE constexpr explicit ctag(TagType tagType) noexcept : ctag{tagType, 0} {} + RX_ALWAYS_INLINE constexpr ctag(TagType tagType, int tagName, int tagField = -1) noexcept : tag_{(uint32_t(tagType) & kTypeMask) | (uint32_t(tagName) << kTypeBits) | (uint32_t(tagField + 1) << kFieldOffset) | ((uint32_t(tagType) & kInvertedTypeMask) << kType1Offset)} { assertrx(tagName >= 0); @@ -43,22 +43,28 @@ class ctag { assertrx(tagField + 1 < (1 << kFieldBits)); } - [[nodiscard]] constexpr TagType Type() const noexcept { return typeImpl(tag_); } - [[nodiscard]] constexpr int Name() const noexcept { return nameImpl(tag_); } - [[nodiscard]] constexpr int Field() const noexcept { return fieldImpl(tag_); } + [[nodiscard]] RX_ALWAYS_INLINE constexpr TagType Type() const noexcept { return typeImpl(tag_); } + [[nodiscard]] RX_ALWAYS_INLINE constexpr int Name() const noexcept { return nameImpl(tag_); } + [[nodiscard]] RX_ALWAYS_INLINE constexpr int Field() const noexcept { return fieldImpl(tag_); } - [[nodiscard]] constexpr bool operator==(ctag other) const noexcept { return tag_ == other.tag_; } - [[nodiscard]] constexpr bool operator!=(ctag other) const noexcept { return !operator==(other); } + [[nodiscard]] RX_ALWAYS_INLINE constexpr bool operator==(ctag other) const noexcept { return tag_ == other.tag_; } + [[nodiscard]] RX_ALWAYS_INLINE constexpr bool operator!=(ctag other) const noexcept { return !operator==(other); } private: - explicit constexpr ctag(uint32_t tag) noexcept : ctag{typeImpl(tag), nameImpl(tag), fieldImpl(tag)} { assertrx_dbg(tag == tag_); } - explicit constexpr ctag(uint64_t tag) noexcept : ctag{typeImpl(tag), nameImpl(tag), fieldImpl(tag)} { assertrx_dbg(tag == tag_); } - [[nodiscard]] constexpr static TagType typeImpl(uint32_t tag) noexcept { + RX_ALWAYS_INLINE explicit constexpr ctag(uint32_t tag) noexcept : ctag{typeImpl(tag), nameImpl(tag), fieldImpl(tag)} { + assertrx_dbg(tag == tag_); + } + RX_ALWAYS_INLINE explicit constexpr ctag(uint64_t tag) noexcept : ctag{typeImpl(tag), nameImpl(tag), fieldImpl(tag)} { + assertrx_dbg(tag == tag_); + } + [[nodiscard]] RX_ALWAYS_INLINE constexpr static TagType typeImpl(uint32_t tag) noexcept { return static_cast((tag & kTypeMask) | ((tag >> kType1Offset) & kInvertedTypeMask)); } - [[nodiscard]] constexpr static int nameImpl(uint32_t tag) noexcept { return (tag >> kTypeBits) & kNameMask; } - [[nodiscard]] constexpr static int fieldImpl(uint32_t tag) noexcept { return int((tag >> kFieldOffset) & kFieldMask) - 1; } - [[nodiscard]] constexpr uint32_t asNumber() const noexcept { return tag_; } + [[nodiscard]] RX_ALWAYS_INLINE constexpr static int nameImpl(uint32_t tag) noexcept { return (tag >> kTypeBits) & kNameMask; } + [[nodiscard]] RX_ALWAYS_INLINE constexpr static int fieldImpl(uint32_t tag) noexcept { + return int((tag >> kFieldOffset) & kFieldMask) - 1; + } + [[nodiscard]] RX_ALWAYS_INLINE constexpr uint32_t asNumber() const noexcept { return tag_; } uint32_t tag_; }; @@ -79,22 +85,23 @@ class carraytag { static constexpr uint32_t kTypeMask = (uint32_t(1) << kTypeBits) - uint32_t(1); public: - constexpr carraytag(uint32_t count, TagType tag) noexcept : atag_(count | (uint32_t(tag) << kCountBits)) { + RX_ALWAYS_INLINE constexpr carraytag(uint32_t count, TagType tag) noexcept : atag_(count | (uint32_t(tag) << kCountBits)) { assertrx(count < (uint32_t(1) << kCountBits)); } - [[nodiscard]] constexpr TagType Type() const noexcept { return typeImpl(atag_); } - [[nodiscard]] constexpr uint32_t Count() const noexcept { return countImpl(atag_); } + [[nodiscard]] RX_ALWAYS_INLINE constexpr TagType Type() const noexcept { return typeImpl(atag_); } + [[nodiscard]] RX_ALWAYS_INLINE constexpr uint32_t Count() const noexcept { return countImpl(atag_); } - [[nodiscard]] constexpr bool operator==(carraytag other) const noexcept { return atag_ == other.atag_; } - [[nodiscard]] constexpr bool operator!=(carraytag other) const noexcept { return !operator==(other); } + [[nodiscard]] RX_ALWAYS_INLINE constexpr bool operator==(carraytag other) const noexcept { return atag_ == other.atag_; } + [[nodiscard]] RX_ALWAYS_INLINE constexpr bool operator!=(carraytag other) const noexcept { return !operator==(other); } private: - explicit constexpr carraytag(uint32_t atag) noexcept : carraytag{countImpl(atag), typeImpl(atag)} { assertrx_dbg(atag == atag_); } - [[nodiscard]] constexpr uint32_t asNumber() const noexcept { return atag_; } - [[nodiscard]] static constexpr TagType typeImpl(uint32_t atag) noexcept { - return static_cast((atag >> kCountBits) & kTypeMask); + RX_ALWAYS_INLINE explicit constexpr carraytag(uint32_t atag) noexcept : carraytag{countImpl(atag), typeImpl(atag)} { assertrx_dbg(atag == atag_); } + [[nodiscard]] RX_ALWAYS_INLINE constexpr uint32_t asNumber() const noexcept { return atag_; } + [[nodiscard]] RX_ALWAYS_INLINE static constexpr TagType typeImpl(uint32_t atag) noexcept { + assertrx_dbg(((atag >> kCountBits) & kTypeMask) <= kMaxTagType); + return static_cast((atag >> kCountBits) & kTypeMask); // NOLINT(*EnumCastOutOfRange) } - [[nodiscard]] static constexpr uint32_t countImpl(uint32_t atag) noexcept { return atag & kCountMask; } + [[nodiscard]] RX_ALWAYS_INLINE static constexpr uint32_t countImpl(uint32_t atag) noexcept { return atag & kCountMask; } uint32_t atag_; }; diff --git a/cpp_src/core/cjson/jsondecoder.cc b/cpp_src/core/cjson/jsondecoder.cc index d10e82539..5a5ebf3da 100644 --- a/cpp_src/core/cjson/jsondecoder.cc +++ b/cpp_src/core/cjson/jsondecoder.cc @@ -74,6 +74,8 @@ void JsonDecoder::decodeJsonObject(Payload &pl, CJsonBuilder &builder, const gas builder.Ref(tagName, v, field); pl.Set(field, std::move(v), true); } break; + default: + throw Error(errLogic, "Unexpected '%d' tag", elem.value.getTag()); } } else { // objectScalarIndexes_.set(field); - do not change objectScalarIndexes_ value for the filtered out fields @@ -132,6 +134,8 @@ void JsonDecoder::decodeJson(Payload *pl, CJsonBuilder &builder, const gason::Js } break; } + default: + throw Error(errLogic, "Unexpected '%d' tag", jsonTag); } } diff --git a/cpp_src/core/cjson/msgpackbuilder.cc b/cpp_src/core/cjson/msgpackbuilder.cc index 32338967e..f50ba39eb 100644 --- a/cpp_src/core/cjson/msgpackbuilder.cc +++ b/cpp_src/core/cjson/msgpackbuilder.cc @@ -152,7 +152,7 @@ void MsgPackBuilder::appendJsonObject(std::string_view name, const gason::JsonNo break; } default: - throw(Error(errLogic, "Unexpected json tag: %d", obj.value.getTag())); + throw(Error(errLogic, "Unexpected json tag: %d", int(obj.value.getTag()))); } } diff --git a/cpp_src/core/cjson/readme.md b/cpp_src/core/cjson/readme.md index 5e152eb85..6cd159583 100644 --- a/cpp_src/core/cjson/readme.md +++ b/cpp_src/core/cjson/readme.md @@ -1,46 +1,53 @@ -`CJSON` (Compact JSON) is internal reindexer binary format for transparing represent JSON data. +`CJSON` (Compact JSON) is binary internal reindexer format for transparently representing JSON data. Each field of CJSON is encoded to `ctag` - varuint, which encodes type and name of field, and `data` binary representation of field data in format dependent of type. + ## Ctag format -| Bits | Field | Description | -|------|-------------|-------------------------------------------------------------------------| -| 3 | TypeTag | Type of field. One of TAG_XXX | -| 12 | NameIndex | Index of field's name in names dictionary. 0 - empty name | -| 6 | FieldIndex | Field index reference in internal reindexer payload. 0 - no reference. | +| Bits | Field | Description | +|------|------------|----------------------------------------------------------------------------------------------------------| +| 3 | TypeTag0 | Type of field. One of TAG_XXX | +| 12 | NameIndex | Index of field's name in names dictionary. 0 - empty name | +| 10 | FieldIndex | Field index reference in internal reindexer payload. 0 - no reference. | +| 4 | Reserved | Reserved for future use. | +| 3 | TypeTag1 | Additional high-order bits for the field type. Together with TypeTag0 they define the actual data type. | + ## Ctag type tag -| Name | Value | Description | -|------------|-------|---------------------------------| -| TAG_VARINT | 0 | Data is number in varint format | -| TAG_DOUBLE | 1 | Data is number in double format | -| TAG_STRING | 2 | Data is string with varint length | -| TAG_ARRAY | 3 | Data is array of elements | -| TAG_BOOL | 4 | Data is bool | -| TAG_NULL | 5 | Null | -| TAG_OBJECT | 6 | Data is object | -| TAG_END | 7 | End of object | +| Name | Value | Description | +|------------|-------|--------------------------------------------------| +| TAG_VARINT | 0 | Data is number in varint format | +| TAG_DOUBLE | 1 | Data is number in double format | +| TAG_STRING | 2 | Data is string with varint length | +| TAG_BOOL | 3 | Data is bool | +| TAG_NULL | 4 | Null | +| TAG_ARRAY | 5 | Data is array of elements | +| TAG_OBJECT | 6 | Data is object | +| TAG_END | 7 | End of object | +| TAG_UUID | 8 | Data in UUID format. High bit stored in TypeTag1 | ## Arrays Arrays can be stored in 2 different ways: -- homogeneous array, with all elements of same type -- mixed array, with elements of various types +- homogeneous array, with all elements of same type (Format: TAG_ARRAY + Atag(TAG_{item} + COUNT), every item write in item format) +- heterogeneous array, with elements of various types (Format: TAG_ARRAY + Atag(TAG_OBJECT + COUNT), every item write in item format with Ctag(TAG_{item} + NameIndex=0 + FieldIndex=0)) + ### Atag - array tag format -Atag is 4 byte int, which encodes type and count elements in array +Atag is 4 byte int, which encodes type and count elements in array (TTTTTTTTNNNNNNNNNNNNNNNNNNNNNNNN) + +| Bits | Field | Description | +|------|---------|---------------------------------------------------------------------------------------------------------| +| 6 | TypeTag | Type of array's elements. If TAG_OBJECT, than array is mixed, and each element contains individual ctag | +| 24 | Count | Count of elements in array | -| Bits | Field | Description | -|------|-------------|-------------------------------------------------------------------------| -| 24 | Count | Count of elements in array | -| 3 | TypeTag | Type of array's elements. If TAG_OBJECT, than array is mixed, and each element contains individual ctag| ## Record format @@ -67,11 +74,11 @@ record := ## CJSON pack format -| # | Field | Description | -|---|----------------------------|----------------------------------------------------------------------------| +| # | Field | Description | +|---|----------------------|----------------------------------------------------------------------------| | 1 | Offset to names dict | Offset to names dictionary. 0 if there are no new names dictionary in pack | -| 2 | Records | Tree of records. Begins with TAG_OBJECT, end TAG_END | -| 3 | Names dictionary | Dictionary of field names | +| 2 | Records | Tree of records. Begins with TAG_OBJECT, end TAG_END | +| 3 | Names dictionary | Dictionary of field names | ``` names_dictionary := @@ -82,6 +89,7 @@ names_dictionary := ] ``` + ## Example of CJSON ```json @@ -104,3 +112,48 @@ names_dictionary := (TAG_END) 07 (TAG_END) 07 ``` + + +### Array representations + +Thus, a heterogeneous array with two elements: the first string is "hello", the second boolean value is "true", can be encoded as follows: +```json +{ + "test": ["hi",true] +} +``` +``` +(TAG_ARRAY, field index) (TAG_OBJECT, array len) (TAG_STRING, string len, char array) (TAG_BOOL, value) +``` +``` +\065\002\000\000\006\002\002hi\003\001\a +``` +| Value | Descripton | +------------------|--------------------------------| +| \065 | Ctag(TAG_ARRAY) | +| \002\000\000\006 | Atag(2 item TAG_OBJECT) | +| \002 | Ctag(TAG_STRING) | +| \002hi | Item string, 2 character, "hi" | +| \003 | Ctag(TAG_BOOL) | +| \001 | Item boolean, 'true' | + + +Homogeneous array + +```json +{ + "test": ["hi","bro"] +} +``` +``` +(TAG_ARRAY, field index) (TAG_STRING, array len) (string len, char array) (string len, char array) +``` +``` +\065\002\000\000\002\002hi\003bro\a +``` +| Value | Descripton | +-------------------|---------------------------------| +| \065 | Ctag(TAG_ARRAY) | +| \002\000\000\002 | Atag(2 item TAG_STRING) | +| \002hi | Item string, 2 character, "hi" | +| \003bro | Item string, 3 character, "bro" | diff --git a/cpp_src/core/ft/ft_fuzzy/baseseacher.cc b/cpp_src/core/ft/ft_fuzzy/baseseacher.cc index 181f17faa..b1207bd9b 100644 --- a/cpp_src/core/ft/ft_fuzzy/baseseacher.cc +++ b/cpp_src/core/ft/ft_fuzzy/baseseacher.cc @@ -75,7 +75,6 @@ SearchResult BaseSearcher::Compare(const BaseHolder::Ptr &holder, const FtDSLQue std::pair pos; pos.first = 0; - SmartDeque result; std::vector rusults; int max_id = 0; int min_id = INT32_MAX; @@ -126,8 +125,6 @@ void BaseSearcher::AddIndex(BaseHolder::Ptr &holder, std::string_view src_data, if (!src_data.length()) return; std::pair pos; pos.first = 0; - std::vector> res; - std::string word, str; std::wstring utf16str; std::vector wrds; split(src_data, utf16str, wrds, extraWordSymbols); diff --git a/cpp_src/core/ft/ft_fuzzy/dataholder/smardeque.cc b/cpp_src/core/ft/ft_fuzzy/dataholder/smardeque.cc index 663ce9915..d327a5e34 100644 --- a/cpp_src/core/ft/ft_fuzzy/dataholder/smardeque.cc +++ b/cpp_src/core/ft/ft_fuzzy/dataholder/smardeque.cc @@ -47,9 +47,9 @@ template void SmartDeque::allocDataPtr(size_t num) { auto tmp_data = new pointer[num]; if (data_) { - memcpy(tmp_data, data_, sizeof(pointer) * size_); + memcpy(reinterpret_cast(tmp_data), reinterpret_cast(data_), sizeof(pointer) * size_); } - memset(tmp_data + size_, 0, (num - size_) * sizeof(pointer)); + memset(reinterpret_cast(tmp_data + size_), 0, (num - size_) * sizeof(pointer)); delete[] data_; data_ = tmp_data; size_ = num; @@ -97,7 +97,8 @@ SmartDeque::SmartDeque(const SmartDeque& rhs) { } size_ = rhs.size_; data_ = new pointer[size_]; - memcpy(data_, rhs.data_, sizeof(pointer) * size_); + memcpy(reinterpret_cast(data_), reinterpret_cast(rhs.data_), sizeof(pointer) * size_); + count_ = rhs.count_; for (size_t i = 0; i < size_; ++i) { if (data_[i] != nullptr) { data_[i] = new T[block_size]; @@ -197,9 +198,20 @@ typename SmartDeque::iterator& SmartDeque::iterato } template -SmartDeque::iterator::iterator() : size_(0), offset_(0), parent_(nullptr), current_(nullptr) {} +SmartDeque::iterator::iterator() : size_(0), offset_(0), parent_(nullptr), current_(nullptr) { + if constexpr(std::is_trivial::value) { + memset(&default_data, 0, sizeof(default_data)); + } else { + static_assert(std::is_default_constructible::value, "Expecting default contractible type"); + } +} template SmartDeque::iterator::iterator(SmartDeque* parent) : size_(0), offset_(0), parent_(parent), current_(nullptr) { + if constexpr(std::is_trivial::value) { + memset(&default_data, 0, sizeof(default_data)); + } else { + static_assert(std::is_default_constructible::value, "Expecting default contractible type"); + } ++(*this); } diff --git a/cpp_src/core/index/indextext/fastindextext.cc b/cpp_src/core/index/indextext/fastindextext.cc index 4bd72ce34..8de162fb3 100644 --- a/cpp_src/core/index/indextext/fastindextext.cc +++ b/cpp_src/core/index/indextext/fastindextext.cc @@ -69,10 +69,8 @@ Variant FastIndexText::Upsert(const Variant &key, IdType id, bool &clearCache template void FastIndexText::Delete(const Variant &key, IdType id, StringsHolder &strHolder, bool &clearCache) { - int delcnt = 0; if rx_unlikely (key.Type().Is()) { - delcnt = this->empty_ids_.Unsorted().Erase(id); - assertrx(delcnt); + this->empty_ids_.Unsorted().Erase(id); // ignore result this->isBuilt_ = false; return; } @@ -82,7 +80,7 @@ void FastIndexText::Delete(const Variant &key, IdType id, StringsHolder &strH this->isBuilt_ = false; this->delMemStat(keyIt); - delcnt = keyIt->second.Unsorted().Erase(id); + int delcnt = keyIt->second.Unsorted().Erase(id); (void)delcnt; // TODO: we have to implement removal of composite indexes (doesn't work right now) assertf(this->opts_.IsArray() || this->Opts().IsSparse() || delcnt, "Delete unexists id from index '%s' id=%d,key=%s", this->name_, id, diff --git a/cpp_src/core/index/indexunordered.cc b/cpp_src/core/index/indexunordered.cc index 25382ca74..af7fb2f59 100644 --- a/cpp_src/core/index/indexunordered.cc +++ b/cpp_src/core/index/indexunordered.cc @@ -204,10 +204,8 @@ Variant IndexUnordered::Upsert(const Variant &key, IdType id, bool &clearCach template void IndexUnordered::Delete(const Variant &key, IdType id, StringsHolder &strHolder, bool &clearCache) { - int delcnt = 0; if (key.Type().Is()) { - delcnt = this->empty_ids_.Unsorted().Erase(id); - assertrx(delcnt); + this->empty_ids_.Unsorted().Erase(id); // ignore result this->isBuilt_ = false; cache_.reset(); clearCache = true; @@ -218,7 +216,7 @@ void IndexUnordered::Delete(const Variant &key, IdType id, StringsHolder &str if (keyIt == idx_map.end()) return; delMemStat(keyIt); - delcnt = keyIt->second.Unsorted().Erase(id); + int delcnt = keyIt->second.Unsorted().Erase(id); (void)delcnt; this->isBuilt_ = false; cache_.reset(); diff --git a/cpp_src/core/indexdef.cc b/cpp_src/core/indexdef.cc index d691df813..74eab7234 100644 --- a/cpp_src/core/indexdef.cc +++ b/cpp_src/core/indexdef.cc @@ -5,84 +5,82 @@ #include "tools/jsontools.h" #include "tools/serializer.h" #include "type_consts_helpers.h" +#include "vendor/frozen/unordered_map.h" namespace { -static const std::vector &condsUsual() { - using namespace std::string_literals; - static const std::vector data{"SET"s, "EQ"s, "ANY"s, "EMPTY"s, "LT"s, "LE"s, "GT"s, "GE"s, "RANGE"s}; +using namespace std::string_view_literals; + +static const std::vector &condsUsual() { + static const std::vector data{"SET"sv, "EQ"sv, "ANY"sv, "EMPTY"sv, "LT"sv, "LE"sv, "GT"sv, "GE"sv, "RANGE"sv}; return data; } -static const std::vector &condsText() { - using namespace std::string_literals; - static const std::vector data{"MATCH"s}; +static const std::vector &condsText() { + static const std::vector data{"MATCH"sv}; return data; } -static const std::vector &condsBool() { - using namespace std::string_literals; - static const std::vector data{"SET"s, "EQ"s, "ANY"s, "EMPTY"s}; +static const std::vector &condsBool() { + static const std::vector data{"SET"sv, "EQ"sv, "ANY"sv, "EMPTY"sv}; return data; } -static const std::vector &condsGeom() { - using namespace std::string_literals; - static const std::vector data{"DWITHIN"s}; +static const std::vector &condsGeom() { + static const std::vector data{"DWITHIN"sv}; return data; } enum Caps { CapComposite = 0x1, CapSortable = 0x2, CapFullText = 0x4 }; struct IndexInfo { - const std::string fieldType, indexType; - const std::vector &conditions; + const std::string_view fieldType, indexType; + const std::vector &conditions; int caps; }; static const std::unordered_map, std::equal_to> &availableIndexes() { - using namespace std::string_literals; // clang-format off static const std::unordered_map, std::equal_to> data { - {IndexIntHash, {"int"s, "hash"s, condsUsual(), CapSortable}}, - {IndexInt64Hash, {"int64"s, "hash"s, condsUsual(), CapSortable}}, - {IndexStrHash, {"string"s, "hash", condsUsual(), CapSortable}}, - {IndexCompositeHash, {"composite"s, "hash"s, condsUsual(), CapSortable | CapComposite}}, - {IndexIntBTree, {"int"s, "tree"s, condsUsual(), CapSortable}}, - {IndexInt64BTree, {"int64"s, "tree"s, condsUsual(), CapSortable}}, - {IndexDoubleBTree, {"double"s, "tree"s, condsUsual(), CapSortable}}, - {IndexCompositeBTree, {"composite"s, "tree"s, condsUsual(), CapComposite | CapSortable}}, - {IndexStrBTree, {"string"s, "tree"s, condsUsual(), CapSortable}}, - {IndexIntStore, {"int"s, "-"s, condsUsual(), CapSortable}}, - {IndexBool, {"bool"s, "-"s, condsBool(), 0}}, - {IndexInt64Store, {"int64"s, "-"s, condsUsual(), CapSortable}}, - {IndexStrStore, {"string"s, "-"s, condsUsual(), CapSortable}}, - {IndexDoubleStore, {"double"s, "-"s, condsUsual(), CapSortable}}, - {IndexTtl, {"int64"s, "ttl"s, condsUsual(), CapSortable}}, - {IndexCompositeFastFT, {"composite"s, "text"s, condsText(), CapComposite | CapFullText}}, - {IndexCompositeFuzzyFT, {"composite"s, "fuzzytext"s, condsText(), CapComposite | CapFullText}}, - {IndexFastFT, {"string"s, "text"s, condsText(), CapFullText}}, - {IndexFuzzyFT, {"string"s, "fuzzytext"s, condsText(), CapFullText}}, - {IndexRTree, {"point"s, "rtree"s, condsGeom(), 0}}, - {IndexUuidHash, {"uuid"s, "hash"s, condsUsual(), CapSortable}}, - {IndexUuidStore, {"uuid"s, "-"s, condsUsual(), CapSortable}}, + {IndexIntHash, {"int"sv, "hash"sv, condsUsual(), CapSortable}}, + {IndexInt64Hash, {"int64"sv, "hash"sv, condsUsual(), CapSortable}}, + {IndexStrHash, {"string"sv, "hash"sv, condsUsual(), CapSortable}}, + {IndexCompositeHash, {"composite"sv, "hash"sv, condsUsual(), CapSortable | CapComposite}}, + {IndexIntBTree, {"int"sv, "tree"sv, condsUsual(), CapSortable}}, + {IndexInt64BTree, {"int64"sv, "tree"sv, condsUsual(), CapSortable}}, + {IndexDoubleBTree, {"double"sv, "tree"sv, condsUsual(), CapSortable}}, + {IndexCompositeBTree, {"composite"sv, "tree"sv, condsUsual(), CapComposite | CapSortable}}, + {IndexStrBTree, {"string"sv, "tree"sv, condsUsual(), CapSortable}}, + {IndexIntStore, {"int"sv, "-"sv, condsUsual(), CapSortable}}, + {IndexBool, {"bool"sv, "-"sv, condsBool(), 0}}, + {IndexInt64Store, {"int64"sv, "-"sv, condsUsual(), CapSortable}}, + {IndexStrStore, {"string"sv, "-"sv, condsUsual(), CapSortable}}, + {IndexDoubleStore, {"double"sv, "-"sv, condsUsual(), CapSortable}}, + {IndexTtl, {"int64"sv, "ttl"sv, condsUsual(), CapSortable}}, + {IndexCompositeFastFT, {"composite"sv, "text"sv, condsText(), CapComposite | CapFullText}}, + {IndexCompositeFuzzyFT, {"composite"sv, "fuzzytext"sv, condsText(), CapComposite | CapFullText}}, + {IndexFastFT, {"string"sv, "text"sv, condsText(), CapFullText}}, + {IndexFuzzyFT, {"string"sv, "fuzzytext"sv, condsText(), CapFullText}}, + {IndexRTree, {"point"sv, "rtree"sv, condsGeom(), 0}}, + {IndexUuidHash, {"uuid"sv, "hash"sv, condsUsual(), CapSortable}}, + {IndexUuidStore, {"uuid"sv, "-"sv, condsUsual(), CapSortable}}, }; // clang-format on return data; } -static const std::unordered_map, std::equal_to> &availableCollates() { - using namespace std::string_literals; - static const std::unordered_map, std::equal_to> data{ - {CollateASCII, "ascii"s}, {CollateUTF8, "utf8"s}, {CollateNumeric, "numeric"s}, {CollateCustom, "custom"s}, {CollateNone, "none"s}, - }; - return data; -} +constexpr static auto kAvailableCollates = frozen::make_unordered_map({ + {CollateASCII, "ascii"sv}, + {CollateUTF8, "utf8"sv}, + {CollateNumeric, "numeric"sv}, + {CollateCustom, "custom"sv}, + {CollateNone, "none"sv}, +}); -constexpr char const *kRTreeLinear = "linear"; -constexpr char const *kRTreeQuadratic = "quadratic"; -constexpr char const *kRTreeGreene = "greene"; -constexpr char const *kRTreeRStar = "rstar"; +constexpr auto kRTreeLinear = "linear"sv; +constexpr auto kRTreeQuadratic = "quadratic"sv; +constexpr auto kRTreeGreene = "greene"sv; +constexpr auto kRTreeRStar = "rstar"sv; } // namespace @@ -120,7 +118,6 @@ bool IndexDef::IsEqual(const IndexDef &other, IndexComparison cmpType) const { } IndexType IndexDef::Type() const { - using namespace std::string_view_literals; std::string_view iType = indexType_; if (iType == "") { if (fieldType_ == "double"sv) { @@ -146,7 +143,7 @@ void IndexDef::FromType(IndexType type) { indexType_ = it.indexType; } -const std::vector &IndexDef::Conditions() const { +const std::vector &IndexDef::Conditions() const noexcept { const auto it{availableIndexes().find(Type())}; assertrx(it != availableIndexes().cend()); return it->second.conditions; @@ -159,8 +156,6 @@ bool isStore(IndexType type) noexcept { type == IndexUuidStore; } -std::string IndexDef::getCollateMode() const { return availableCollates().at(opts_.GetCollateMode()); } - Error IndexDef::FromJSON(span json) { try { IndexDef::FromJSON(gason::JsonParser().Parse(json)); @@ -210,9 +205,9 @@ void IndexDef::FromJSON(const gason::JsonNode &root) { auto collateStr = root["collate_mode"].As(); if (!collateStr.empty()) { - auto collateIt = find_if(begin(availableCollates()), end(availableCollates()), - [&collateStr](const std::pair &p) { return collateStr == p.second; }); - if (collateIt == end(availableCollates())) throw Error(errParams, "Unknown collate mode %s", collateStr); + auto collateIt = find_if(begin(kAvailableCollates), end(kAvailableCollates), + [&collateStr](const std::pair &p) { return collateStr == p.second; }); + if (collateIt == end(kAvailableCollates)) throw Error(errParams, "Unknown collate mode %s", collateStr); CollateMode collateValue = collateIt->first; opts_.SetCollateMode(collateValue); if (collateValue == CollateCustom) { @@ -250,7 +245,7 @@ void IndexDef::GetJSON(WrSerializer &ser, int formatFlags) const { abort(); } } - builder.Put("collate_mode", getCollateMode()) + builder.Put("collate_mode", kAvailableCollates.at(opts_.GetCollateMode())) .Put("sort_order_letters", opts_.collateOpts_.sortOrderTable.GetSortOrderCharacters()) .Put("expire_after", expireAfter_) .Raw("config", opts_.HasConfig() ? opts_.config : "{}"); diff --git a/cpp_src/core/indexdef.h b/cpp_src/core/indexdef.h index 8958c4674..91e8eb3e0 100644 --- a/cpp_src/core/indexdef.h +++ b/cpp_src/core/indexdef.h @@ -28,8 +28,7 @@ struct IndexDef { IndexDef(std::string name, JsonPaths jsonPaths, IndexType type, IndexOpts opts); bool IsEqual(const IndexDef &other, IndexComparison cmpType) const; IndexType Type() const; - std::string getCollateMode() const; - const std::vector &Conditions() const; + const std::vector &Conditions() const noexcept; void FromType(IndexType type); Error FromJSON(span json); void FromJSON(const gason::JsonNode &jvalue); diff --git a/cpp_src/core/item.cc b/cpp_src/core/item.cc index 9dd1b3b18..3100b1ccd 100644 --- a/cpp_src/core/item.cc +++ b/cpp_src/core/item.cc @@ -124,11 +124,11 @@ Item::~Item() { } } -Error Item::FromJSON(std::string_view slice, char **endp, bool pkOnly) &noexcept { +Error Item::FromJSON(std::string_view slice, char **endp, bool pkOnly) & noexcept { RETURN_RESULT_NOEXCEPT(impl_->FromJSON(slice, endp, pkOnly)); } -Error Item::FromCJSON(std::string_view slice, bool pkOnly) &noexcept { +Error Item::FromCJSON(std::string_view slice, bool pkOnly) & noexcept { try { impl_->FromCJSON(slice, pkOnly); } @@ -140,13 +140,13 @@ void Item::FromCJSONImpl(std::string_view slice, bool pkOnly) & { impl_->FromCJS std::string_view Item::GetCJSON() { return impl_->GetCJSON(); } std::string_view Item::GetJSON() { return impl_->GetJSON(); } -Error Item::FromMsgPack(std::string_view buf, size_t &offset) &noexcept { RETURN_RESULT_NOEXCEPT(impl_->FromMsgPack(buf, offset)); } +Error Item::FromMsgPack(std::string_view buf, size_t &offset) & noexcept { RETURN_RESULT_NOEXCEPT(impl_->FromMsgPack(buf, offset)); } -Error Item::FromProtobuf(std::string_view sbuf) &noexcept { RETURN_RESULT_NOEXCEPT(impl_->FromProtobuf(sbuf)); } +Error Item::FromProtobuf(std::string_view sbuf) & noexcept { RETURN_RESULT_NOEXCEPT(impl_->FromProtobuf(sbuf)); } -Error Item::GetMsgPack(WrSerializer &wrser) &noexcept { RETURN_RESULT_NOEXCEPT(impl_->GetMsgPack(wrser)); } +Error Item::GetMsgPack(WrSerializer &wrser) & noexcept { RETURN_RESULT_NOEXCEPT(impl_->GetMsgPack(wrser)); } -Error Item::GetProtobuf(WrSerializer &wrser) &noexcept { RETURN_RESULT_NOEXCEPT(impl_->GetProtobuf(wrser)); } +Error Item::GetProtobuf(WrSerializer &wrser) & noexcept { RETURN_RESULT_NOEXCEPT(impl_->GetProtobuf(wrser)); } int Item::NumFields() const { return impl_->Type().NumFields(); } @@ -157,9 +157,9 @@ Item::FieldRef Item::operator[](int field) const { return FieldRef(field, impl_); } -Item::FieldRef Item::operator[](std::string_view name) const { +Item::FieldRef Item::FieldRefByName(std::string_view name, ItemImpl &impl) { int field = 0; - return (impl_->Type().FieldByName(name, field)) ? FieldRef(field, impl_) : FieldRef(name, impl_); + return (impl.Type().FieldByName(name, field)) ? FieldRef(field, &impl) : FieldRef(name, &impl); } int Item::GetFieldTag(std::string_view name) const { return impl_->NameTag(name); } @@ -168,7 +168,7 @@ void Item::SetPrecepts(const std::vector &precepts) & { impl_->SetP bool Item::IsTagsUpdated() const noexcept { return impl_->tagsMatcher().isUpdated(); } int Item::GetStateToken() const noexcept { return impl_->tagsMatcher().stateToken(); } -Item &Item::Unsafe(bool enable) &noexcept { +Item &Item::Unsafe(bool enable) & noexcept { impl_->Unsafe(enable); return *this; } diff --git a/cpp_src/core/item.h b/cpp_src/core/item.h index 28b04ab88..7892a9dc1 100644 --- a/cpp_src/core/item.h +++ b/cpp_src/core/item.h @@ -34,7 +34,7 @@ class Item { Item &operator=(Item &&) noexcept; /// Reference to field. Interface for field data manipulation - class FieldRef { + class [[nodiscard]] FieldRef { friend class Item; public: @@ -122,32 +122,32 @@ class Item { /// @param slice - data slice with Json. /// @param endp - pointer to end of parsed part of slice /// @param pkOnly - if TRUE, that mean a JSON string will be parse only primary key fields - Error FromJSON(std::string_view slice, char **endp = nullptr, bool pkOnly = false) &noexcept; + Error FromJSON(std::string_view slice, char **endp = nullptr, bool pkOnly = false) & noexcept; /// Build item from JSON
/// If Item is in *Unsafe Mode*, then Item will not store slice, but just keep pointer to data in slice, /// application *MUST* hold slice until end of life of Item /// @param slice - data slice with CJson /// @param pkOnly - if TRUE, that mean a JSON string will be parse only primary key fields - Error FromCJSON(std::string_view slice, bool pkOnly = false) &noexcept; + Error FromCJSON(std::string_view slice, bool pkOnly = false) & noexcept; void FromCJSONImpl(std::string_view slice, bool pkOnly = false) &; /// Builds item from msgpack::object. /// @param buf - msgpack encoded data buffer. /// @param offset - position to start from. - Error FromMsgPack(std::string_view buf, size_t &offset) &noexcept; + Error FromMsgPack(std::string_view buf, size_t &offset) & noexcept; /// Builds item from Protobuf /// @param sbuf - Protobuf encoded data - Error FromProtobuf(std::string_view sbuf) &noexcept; + Error FromProtobuf(std::string_view sbuf) & noexcept; /// Packs data in msgpack format /// @param wrser - buffer to serialize data to - Error GetMsgPack(WrSerializer &wrser) &noexcept; + Error GetMsgPack(WrSerializer &wrser) & noexcept; /// Packs item data to Protobuf /// @param wrser - buffer to serialize data to - Error GetProtobuf(WrSerializer &wrser) &noexcept; + Error GetProtobuf(WrSerializer &wrser) & noexcept; /// Serialize item to CJSON.
/// If Item is in *Unsafe Mode*, then returned slice is allocated in temporary buffer, and can be invalidated by any next operation @@ -174,11 +174,11 @@ class Item { /// Get field by number /// @param field - number of field. Must be >= 0 && < NumFields /// @return FieldRef which contains reference to indexed field - [[nodiscard]] FieldRef operator[](int field) const; + FieldRef operator[](int field) const; /// Get field by name /// @param name - name of field /// @return FieldRef which contains reference to indexed field - [[nodiscard]] FieldRef operator[](std::string_view name) const; + FieldRef operator[](std::string_view name) const { return FieldRefByName(name, *impl_); } /// Get field's name tag /// @param name - field name /// @return name's numeric tag value @@ -201,10 +201,15 @@ class Item { /// The advantage of unsafe mode is speed. It does not call extra memory allocation from heap and copying data.
/// The disadvantage of unsafe mode is potentially danger code. Most of C++ stl containters in many cases invalidates references - /// and in unsafe mode caller is responsibe to guarantee, that all resources passed to Item will keep valid - Item &Unsafe(bool enable = true) &noexcept; + Item &Unsafe(bool enable = true) & noexcept; /// Get index type by field id /// @return either index type or Undefined (if index with this number does not exist or PayloadType is not available) KeyValueType GetIndexType(int field) const noexcept; + /// Get field's ref by name + /// @param name - field name + /// @param itemImpl - item + /// @return field's ref + static FieldRef FieldRefByName(std::string_view name, ItemImpl &itemImpl); private: explicit Item(ItemImpl *impl) : impl_(impl) {} diff --git a/cpp_src/core/itemmodifier.cc b/cpp_src/core/itemmodifier.cc index cebd4621e..7dcba0079 100644 --- a/cpp_src/core/itemmodifier.cc +++ b/cpp_src/core/itemmodifier.cc @@ -505,7 +505,7 @@ void ItemModifier::modifyIndexValues(IdType itemId, const FieldData &field, Vari throw Error(errParams, "Cannot update array item with an empty value"); // TODO #1218 maybe delete this } int offset = -1, length = -1; - bool isForAllItems = false; + isForAllItems = false; for (const auto &tag : field.tagspath()) { // TODO: Move to FieldEntry? if (tag.IsForAllItems()) { isForAllItems = true; diff --git a/cpp_src/core/key_value_type.h b/cpp_src/core/key_value_type.h index b2a1b142c..6a76f5a95 100644 --- a/cpp_src/core/key_value_type.h +++ b/cpp_src/core/key_value_type.h @@ -84,9 +84,9 @@ class KeyValueType { Tuple, Uuid } value_{KVT::Undefined}; - constexpr explicit KeyValueType(KVT v) noexcept : value_{v} {} + RX_ALWAYS_INLINE constexpr explicit KeyValueType(KVT v) noexcept : value_{v} {} - [[nodiscard]] static KeyValueType fromNumber(int n) { + [[nodiscard]] RX_ALWAYS_INLINE static KeyValueType fromNumber(int n) { switch (n) { case static_cast(KVT::Int64): case static_cast(KVT::Double): @@ -103,24 +103,24 @@ class KeyValueType { throw Error(errParams, "Invalid int value for KeyValueType: " + std::to_string(n)); } } - [[nodiscard]] int toNumber() const noexcept { return static_cast(value_); } + [[nodiscard]] RX_ALWAYS_INLINE int toNumber() const noexcept { return static_cast(value_); } public: - constexpr KeyValueType(Int64) noexcept : value_{KVT::Int64} {} - constexpr KeyValueType(Double) noexcept : value_{KVT::Double} {} - constexpr KeyValueType(String) noexcept : value_{KVT::String} {} - constexpr KeyValueType(Bool) noexcept : value_{KVT::Bool} {} - constexpr KeyValueType(Null) noexcept : value_{KVT::Null} {} - constexpr KeyValueType(Int) noexcept : value_{KVT::Int} {} - constexpr KeyValueType(Undefined) noexcept : value_{KVT::Undefined} {} - constexpr KeyValueType(Composite) noexcept : value_{KVT::Composite} {} - constexpr KeyValueType(Tuple) noexcept : value_{KVT::Tuple} {} - constexpr KeyValueType(Uuid) noexcept : value_{KVT::Uuid} {} - constexpr KeyValueType(const KeyValueType&) noexcept = default; - constexpr KeyValueType& operator=(const KeyValueType&) noexcept = default; - constexpr KeyValueType(KeyValueType&&) noexcept = default; - constexpr KeyValueType& operator=(KeyValueType&&) noexcept = default; - explicit KeyValueType(TagType t) { + RX_ALWAYS_INLINE constexpr KeyValueType(Int64) noexcept : value_{KVT::Int64} {} + RX_ALWAYS_INLINE constexpr KeyValueType(Double) noexcept : value_{KVT::Double} {} + RX_ALWAYS_INLINE constexpr KeyValueType(String) noexcept : value_{KVT::String} {} + RX_ALWAYS_INLINE constexpr KeyValueType(Bool) noexcept : value_{KVT::Bool} {} + RX_ALWAYS_INLINE constexpr KeyValueType(Null) noexcept : value_{KVT::Null} {} + RX_ALWAYS_INLINE constexpr KeyValueType(Int) noexcept : value_{KVT::Int} {} + RX_ALWAYS_INLINE constexpr KeyValueType(Undefined) noexcept : value_{KVT::Undefined} {} + RX_ALWAYS_INLINE constexpr KeyValueType(Composite) noexcept : value_{KVT::Composite} {} + RX_ALWAYS_INLINE constexpr KeyValueType(Tuple) noexcept : value_{KVT::Tuple} {} + RX_ALWAYS_INLINE constexpr KeyValueType(Uuid) noexcept : value_{KVT::Uuid} {} + RX_ALWAYS_INLINE constexpr KeyValueType(const KeyValueType&) noexcept = default; + RX_ALWAYS_INLINE constexpr KeyValueType& operator=(const KeyValueType&) noexcept = default; + RX_ALWAYS_INLINE constexpr KeyValueType(KeyValueType&&) noexcept = default; + RX_ALWAYS_INLINE constexpr KeyValueType& operator=(KeyValueType&&) noexcept = default; + RX_ALWAYS_INLINE explicit KeyValueType(TagType t) { switch (t) { case TAG_VARINT: value_ = KVT::Int64; diff --git a/cpp_src/core/namespace/namespaceimpl.cc b/cpp_src/core/namespace/namespaceimpl.cc index b4086660a..0e60b57a8 100644 --- a/cpp_src/core/namespace/namespaceimpl.cc +++ b/cpp_src/core/namespace/namespaceimpl.cc @@ -37,7 +37,7 @@ using std::chrono::microseconds; #define kStorageMetaPrefix "meta" #define kTupleName "-tuple" -static const std::string kPKIndexName = "#pk"; +constexpr static std::string_view kPKIndexName = "#pk"; constexpr int kWALStatementItemsThreshold = 5; #define kStorageMagic 0x1234FEDC @@ -1004,12 +1004,12 @@ NamespaceImpl::RollBack_insertIndex NamespaceImpl::insertIndex(std::unique_ptr std::string { diff --git a/cpp_src/core/nsselecter/selectiterator.h b/cpp_src/core/nsselecter/selectiterator.h index 7384cf389..91a74dac5 100644 --- a/cpp_src/core/nsselecter/selectiterator.h +++ b/cpp_src/core/nsselecter/selectiterator.h @@ -23,7 +23,6 @@ class SelectIterator : public SelectKeyResult { UnbuiltSortOrdersIndex, }; - SelectIterator() = default; SelectIterator(SelectKeyResult &&res, bool dist, std::string &&n, IteratorFieldKind fKind, bool forcedFirst = false) noexcept : SelectKeyResult(std::move(res)), distinct(dist), @@ -233,7 +232,7 @@ class SelectIterator : public SelectKeyResult { bool distinct = false; std::string name; - IteratorFieldKind fieldKind; + IteratorFieldKind fieldKind = IteratorFieldKind::None; protected: // Iterates to a next item of result diff --git a/cpp_src/core/nsselecter/sortexpression.cc b/cpp_src/core/nsselecter/sortexpression.cc index 47d9f4652..eb0b68fed 100644 --- a/cpp_src/core/nsselecter/sortexpression.cc +++ b/cpp_src/core/nsselecter/sortexpression.cc @@ -2,7 +2,7 @@ #include #include "core/namespace/namespaceimpl.h" #include "core/queryresults/joinresults.h" -#include "estl/fast_hash_set.h" +#include "estl/charset.h" #include "estl/restricted.h" #include "joinedselector.h" #include "joinedselectormock.h" @@ -11,7 +11,7 @@ namespace { -static void throwParseError(const std::string_view sortExpr, char const* const pos, const std::string_view message) { +static RX_NO_INLINE void throwParseError(const std::string_view sortExpr, char const* const pos, const std::string_view message) { throw reindexer::Error(errParams, "'%s' is not valid sort expression. Parser failed at position %d.%s%s", sortExpr, pos - sortExpr.data(), message.empty() ? "" : " ", message); } @@ -192,19 +192,19 @@ struct ParseIndexNameResult { std::string_view name; }; +constexpr static estl::Charset kIndexNameSyms{'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', + 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', + 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', + 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_', '.', '+', '"'}; + template static ParseIndexNameResult parseIndexName(std::string_view& expr, const std::vector& joinedSelectors, const std::string_view fullExpr) { - static const fast_hash_set allowedSymbolsInIndexName{ - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', - 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', - 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_', '.', '+', '"'}; - auto pos = expr.data(); const auto end = expr.data() + expr.size(); auto joinedSelectorIt = joinedSelectors.cend(); bool joinedFieldInQuotes = false; - while (pos != end && *pos != '.' && allowedSymbolsInIndexName.find(*pos) != allowedSymbolsInIndexName.end()) ++pos; + while (pos != end && *pos != '.' && kIndexNameSyms.test(*pos)) ++pos; if (pos != end && *pos == '.') { std::string_view namespaceName = {expr.data(), static_cast(pos - expr.data())}; @@ -228,7 +228,7 @@ static ParseIndexNameResult parseIndexName(std::string_view& expr, const std: joinedFieldInQuotes = false; } } - while (pos != end && allowedSymbolsInIndexName.find(*pos) != allowedSymbolsInIndexName.end()) ++pos; + while (pos != end && kIndexNameSyms.test(*pos)) ++pos; std::string_view name{expr.data(), static_cast(pos - expr.data())}; if (name.empty()) { throwParseError(fullExpr, pos, "Expected index or function name."); diff --git a/cpp_src/core/query/dsl/dslencoder.cc b/cpp_src/core/query/dsl/dslencoder.cc index 51680d249..65662a9d4 100644 --- a/cpp_src/core/query/dsl/dslencoder.cc +++ b/cpp_src/core/query/dsl/dslencoder.cc @@ -1,44 +1,47 @@ #include "dslencoder.h" -#include #include "core/cjson/jsonbuilder.h" #include "core/keyvalue/p_string.h" #include "core/query/query.h" #include "core/queryresults/aggregationresult.h" #include "dslparser.h" #include "tools/logger.h" - -struct EnumClassHash { - template - size_t operator()(T t) const { - return static_cast(t); - } -}; +#include "vendor/frozen/unordered_map.h" namespace reindexer { namespace dsl { -static const std::unordered_map join_types = { - {InnerJoin, "inner"}, {LeftJoin, "left"}, {OrInnerJoin, "orinner"}}; - -static const std::unordered_map cond_map = { - {CondAny, "any"}, {CondEq, "eq"}, {CondLt, "lt"}, {CondLe, "le"}, {CondGt, "gt"}, {CondGe, "ge"}, - {CondRange, "range"}, {CondSet, "set"}, {CondAllSet, "allset"}, {CondEmpty, "empty"}, {CondLike, "like"}, {CondDWithin, "dwithin"}, -}; - -static const std::unordered_map op_map = {{OpOr, "or"}, {OpAnd, "and"}, {OpNot, "not"}}; - -static const std::unordered_map reqtotal_values = { - {ModeNoTotal, "disabled"}, {ModeAccurateTotal, "enabled"}, {ModeCachedTotal, "cached"}}; +constexpr static auto kJoinTypes = + frozen::make_unordered_map({{InnerJoin, "inner"}, {LeftJoin, "left"}, {OrInnerJoin, "orinner"}}); + +constexpr static auto kCondMap = frozen::make_unordered_map({ + {CondAny, "any"}, + {CondEq, "eq"}, + {CondLt, "lt"}, + {CondLe, "le"}, + {CondGt, "gt"}, + {CondGe, "ge"}, + {CondRange, "range"}, + {CondSet, "set"}, + {CondAllSet, "allset"}, + {CondEmpty, "empty"}, + {CondLike, "like"}, + {CondDWithin, "dwithin"}, +}); + +constexpr static auto kOpMap = frozen::make_unordered_map({{OpOr, "or"}, {OpAnd, "and"}, {OpNot, "not"}}); + +constexpr static auto kReqTotalValues = frozen::make_unordered_map( + {{ModeNoTotal, "disabled"}, {ModeAccurateTotal, "enabled"}, {ModeCachedTotal, "cached"}}); enum class QueryScope { Main, Subquery }; -template -std::string get(std::unordered_map const& m, const T& key) { +template +std::string_view get(frozen::unordered_map const& m, const T& key) { auto it = m.find(key); if (it != m.end()) return it->second; assertrx(it != m.end()); - return std::string(); + return std::string_view(); } static void encodeSorting(const SortingEntries& sortingEntries, JsonBuilder& builder) { @@ -132,15 +135,15 @@ static void encodeAggregationFunctions(const Query& query, JsonBuilder& builder) static void encodeJoinEntry(const QueryJoinEntry& joinEntry, JsonBuilder& builder) { builder.Put("left_field", joinEntry.LeftFieldName()); builder.Put("right_field", joinEntry.RightFieldName()); - builder.Put("cond", get(cond_map, joinEntry.Condition())); - builder.Put("op", get(op_map, joinEntry.Operation())); + builder.Put("cond", get(kCondMap, joinEntry.Condition())); + builder.Put("op", get(kOpMap, joinEntry.Operation())); } void encodeSingleJoinQuery(const JoinedQuery& joinQuery, JsonBuilder& builder) { using namespace std::string_view_literals; auto node = builder.Object("join_query"sv); - node.Put("type", get(join_types, joinQuery.joinType)); + node.Put("type", get(kJoinTypes, joinQuery.joinType)); node.Put("namespace", joinQuery.NsName()); node.Put("limit", joinQuery.Limit()); node.Put("offset", joinQuery.Offset()); @@ -177,7 +180,7 @@ static void putValues(JsonBuilder& builder, const VariantArray& values) { static void encodeFilter(const QueryEntry& qentry, JsonBuilder& builder) { if (qentry.Distinct()) return; - builder.Put("cond", get(cond_map, CondType(qentry.Condition()))); + builder.Put("cond", get(kCondMap, CondType(qentry.Condition()))); builder.Put("field", qentry.FieldName()); putValues(builder, qentry.Values()); } @@ -224,7 +227,7 @@ void toDsl(const Query& query, QueryScope scope, JsonBuilder& builder) { builder.Put("namespace", query.NsName()); builder.Put("limit", query.Limit()); builder.Put("offset", query.Offset()); - builder.Put("req_total", get(reqtotal_values, query.CalcTotal())); + builder.Put("req_total", get(kReqTotalValues, query.CalcTotal())); if (scope != QueryScope::Subquery) { builder.Put("explain", query.NeedExplain()); builder.Put("type", "select"); @@ -300,7 +303,7 @@ std::string toDsl(const Query& query) { void QueryEntries::toDsl(const_iterator it, const_iterator to, const Query& parentQuery, JsonBuilder& builder) { for (; it != to; ++it) { auto node = builder.Object(); - node.Put("op", dsl::get(dsl::op_map, it->operation)); + node.Put("op", dsl::get(dsl::kOpMap, it->operation)); it->Visit( [&node](const AlwaysFalse&) { logPrintf(LogTrace, "Not normalized query to dsl"); @@ -311,7 +314,7 @@ void QueryEntries::toDsl(const_iterator it, const_iterator to, const Query& pare node.Put("always", true); }, [&node, &parentQuery](const SubQueryEntry& sqe) { - node.Put("cond", dsl::get(dsl::cond_map, CondType(sqe.Condition()))); + node.Put("cond", dsl::get(dsl::kCondMap, CondType(sqe.Condition()))); { auto subquery = node.Object("subquery"); dsl::toDsl(parentQuery.GetSubQuery(sqe.QueryIndex()), dsl::QueryScope::Subquery, subquery); @@ -319,7 +322,7 @@ void QueryEntries::toDsl(const_iterator it, const_iterator to, const Query& pare dsl::putValues(node, sqe.Values()); }, [&node, &parentQuery](const SubQueryFieldEntry& sqe) { - node.Put("cond", dsl::get(dsl::cond_map, CondType(sqe.Condition()))); + node.Put("cond", dsl::get(dsl::kCondMap, CondType(sqe.Condition()))); node.Put("field", sqe.FieldName()); auto subquery = node.Object("subquery"); dsl::toDsl(parentQuery.GetSubQuery(sqe.QueryIndex()), dsl::QueryScope::Subquery, subquery); @@ -338,7 +341,7 @@ void QueryEntries::toDsl(const_iterator it, const_iterator to, const Query& pare dsl::encodeSingleJoinQuery(parentQuery.GetJoinQueries()[jqe.joinIndex], node); }, [&node](const BetweenFieldsQueryEntry& qe) { - node.Put("cond", dsl::get(dsl::cond_map, CondType(qe.Condition()))); + node.Put("cond", dsl::get(dsl::kCondMap, CondType(qe.Condition()))); node.Put("first_field", qe.LeftFieldName()); node.Put("second_field", qe.RightFieldName()); }); diff --git a/cpp_src/core/query/dsl/dslparser.cc b/cpp_src/core/query/dsl/dslparser.cc index fd28e58fd..6fd9d40b0 100644 --- a/cpp_src/core/query/dsl/dslparser.cc +++ b/cpp_src/core/query/dsl/dslparser.cc @@ -1,12 +1,13 @@ #include "dslparser.h" #include "core/cjson/jschemachecker.h" #include "core/query/query.h" -#include "estl/fast_hash_map.h" #include "gason/gason.h" #include "tools/errors.h" +#include "tools/frozen_str_tools.h" #include "tools/json2kv.h" #include "tools/jsontools.h" #include "tools/stringstools.h" +#include "vendor/frozen/unordered_map.h" namespace reindexer { using namespace gason; @@ -43,11 +44,13 @@ enum class EqualPosition { Positions }; enum class UpdateField { Name, Type, Values, IsArray }; enum class UpdateFieldType { Object, Expression, Value }; -// additional for parse root DSL fields -template -using fast_str_map = fast_hash_map; +template +constexpr auto MakeFastStrMap(std::pair const (&items)[N]) { + return frozen::make_unordered_map(items, frozen::nocase_hash_str{}, frozen::nocase_equal_str{}); +} -static const fast_str_map root_map = { +// additional for parse root DSL fields +constexpr static auto kRootMap = MakeFastStrMap({ {"namespace", Root::Namespace}, {"limit", Root::Limit}, {"offset", Root::Offset}, @@ -65,90 +68,105 @@ static const fast_str_map root_map = { {"type", Root::QueryType}, {"drop_fields", Root::DropFields}, {"update_fields", Root::UpdateFields}, -}; +}); // additional for parse field 'sort' - -static const fast_str_map sort_map = {{"desc", Sort::Desc}, {"field", Sort::Field}, {"values", Sort::Values}}; +constexpr static auto kSortMap = MakeFastStrMap({{"desc", Sort::Desc}, {"field", Sort::Field}, {"values", Sort::Values}}); // additional for parse field 'joined' +constexpr static auto joins_map = MakeFastStrMap({{"type", JoinRoot::Type}, + {"namespace", JoinRoot::Namespace}, + {"filters", JoinRoot::Filters}, + {"sort", JoinRoot::Sort}, + {"limit", JoinRoot::Limit}, + {"offset", JoinRoot::Offset}, + {"on", JoinRoot::On}, + {"select_filter", JoinRoot::SelectFilter}}); -static const fast_str_map joins_map = {{"type", JoinRoot::Type}, {"namespace", JoinRoot::Namespace}, - {"filters", JoinRoot::Filters}, {"sort", JoinRoot::Sort}, - {"limit", JoinRoot::Limit}, {"offset", JoinRoot::Offset}, - {"on", JoinRoot::On}, {"select_filter", JoinRoot::SelectFilter}}; +constexpr static auto joined_entry_map = MakeFastStrMap( + {{"left_field", JoinEntry::LeftField}, {"right_field", JoinEntry::RightField}, {"cond", JoinEntry::Cond}, {"op", JoinEntry::Op}}); -static const fast_str_map joined_entry_map = { - {"left_field", JoinEntry::LeftField}, {"right_field", JoinEntry::RightField}, {"cond", JoinEntry::Cond}, {"op", JoinEntry::Op}}; - -static const fast_str_map join_types = {{"inner", InnerJoin}, {"left", LeftJoin}, {"orinner", OrInnerJoin}}; +constexpr static auto join_types = MakeFastStrMap({{"inner", InnerJoin}, {"left", LeftJoin}, {"orinner", OrInnerJoin}}); // additionalfor parse field 'filters' - -static const fast_str_map filter_map = {{"cond", Filter::Cond}, - {"op", Filter::Op}, - {"field", Filter::Field}, - {"value", Filter::Value}, - {"filters", Filter::Filters}, - {"join_query", Filter::JoinQuery}, - {"first_field", Filter::FirstField}, - {"second_field", Filter::SecondField}, - {"equal_positions", Filter::EqualPositions}, - {"subquery", Filter::SubQuery}, - {"always", Filter::Always}}; +constexpr static auto filter_map = MakeFastStrMap({{"cond", Filter::Cond}, + {"op", Filter::Op}, + {"field", Filter::Field}, + {"value", Filter::Value}, + {"filters", Filter::Filters}, + {"join_query", Filter::JoinQuery}, + {"first_field", Filter::FirstField}, + {"second_field", Filter::SecondField}, + {"equal_positions", Filter::EqualPositions}, + {"subquery", Filter::SubQuery}, + {"always", Filter::Always}}); // additional for 'filter::cond' field - -static const fast_str_map cond_map = { - {"any", CondAny}, {"eq", CondEq}, {"lt", CondLt}, {"le", CondLe}, {"gt", CondGt}, - {"ge", CondGe}, {"range", CondRange}, {"set", CondSet}, {"allset", CondAllSet}, {"empty", CondEmpty}, - {"match", CondEq}, {"like", CondLike}, {"dwithin", CondDWithin}, -}; - -static const fast_str_map op_map = {{"or", OpOr}, {"and", OpAnd}, {"not", OpNot}}; +constexpr static auto cond_map = MakeFastStrMap({ + {"any", CondAny}, + {"eq", CondEq}, + {"lt", CondLt}, + {"le", CondLe}, + {"gt", CondGt}, + {"ge", CondGe}, + {"range", CondRange}, + {"set", CondSet}, + {"allset", CondAllSet}, + {"empty", CondEmpty}, + {"match", CondEq}, + {"like", CondLike}, + {"dwithin", CondDWithin}, +}); + +constexpr static auto kOpMap = MakeFastStrMap({{"or", OpOr}, {"and", OpAnd}, {"not", OpNot}}); // additional for 'Root::ReqTotal' field -static const fast_str_map reqtotal_values = { - {"disabled", ModeNoTotal}, {"enabled", ModeAccurateTotal}, {"cached", ModeCachedTotal}}; +constexpr static auto kReqTotalValues = + MakeFastStrMap({{"disabled", ModeNoTotal}, {"enabled", ModeAccurateTotal}, {"cached", ModeCachedTotal}}); // additional for 'Root::Aggregations' field - -static const fast_str_map aggregation_map = {{"fields", Aggregation::Fields}, - {"type", Aggregation::Type}, - {"sort", Aggregation::Sort}, - {"limit", Aggregation::Limit}, - {"offset", Aggregation::Offset}}; -static const fast_str_map aggregation_types = { - {"sum", AggSum}, {"avg", AggAvg}, {"max", AggMax}, {"min", AggMin}, - {"facet", AggFacet}, {"distinct", AggDistinct}, {"count", AggCount}, {"count_cached", AggCountCached}, -}; +constexpr static auto kAggregationMap = MakeFastStrMap({{"fields", Aggregation::Fields}, + {"type", Aggregation::Type}, + {"sort", Aggregation::Sort}, + {"limit", Aggregation::Limit}, + {"offset", Aggregation::Offset}}); +constexpr static auto kAggregationTypes = MakeFastStrMap({ + {"sum", AggSum}, + {"avg", AggAvg}, + {"max", AggMax}, + {"min", AggMin}, + {"facet", AggFacet}, + {"distinct", AggDistinct}, + {"count", AggCount}, + {"count_cached", AggCountCached}, +}); // additionalfor parse field 'equation_positions' -static const fast_str_map equationPosition_map = {{"positions", EqualPosition::Positions}}; +constexpr static auto kEquationPositionMap = MakeFastStrMap({{"positions", EqualPosition::Positions}}); // additional for 'Root::QueryType' field -static const fast_str_map query_types = { +constexpr static auto kQueryTypes = MakeFastStrMap({ {"select", QuerySelect}, {"update", QueryUpdate}, {"delete", QueryDelete}, {"truncate", QueryTruncate}, -}; +}); // additional for 'Root::UpdateField' field -static const fast_str_map update_field_map = { +constexpr static auto kUpdateFieldMap = MakeFastStrMap({ {"name", UpdateField::Name}, {"type", UpdateField::Type}, {"values", UpdateField::Values}, {"is_array", UpdateField::IsArray}, -}; +}); // additional for 'Root::UpdateFieldType' field -static const fast_str_map update_field_type_map = { +constexpr static auto kUpdateFieldTypeMap = MakeFastStrMap({ {"object", UpdateFieldType::Object}, {"expression", UpdateFieldType::Expression}, {"value", UpdateFieldType::Value}, -}; +}); static bool checkTag(const JsonValue& val, JsonTag tag) noexcept { return val.getTag() == tag; } @@ -162,8 +180,9 @@ void checkJsonValueType(const JsonValue& val, std::string_view name, JsonTags... if (!checkTag(val, possibleTags...)) throw Error(errParseJson, "Wrong type of field '%s'", name); } -template -T get(fast_str_map const& m, std::string_view name, std::string_view mapName) { +template +T get(frozen::unordered_map const& m, std::string_view name, + std::string_view mapName) { auto it = m.find(name); if (it == m.end()) { throw Error(errParseDSL, "Element [%s] not allowed in object of type [%s]", name, mapName); @@ -183,26 +202,22 @@ void parseStringArray(const JsonValue& stringArray, Arr& array) { template void parseValues(const JsonValue& values, Array& kvs) { if (values.getTag() == JSON_ARRAY) { + uint32_t objectsCount = 0; for (const auto& elem : values) { Variant kv; if (elem.value.getTag() == JSON_OBJECT) { kv = Variant(stringifyJson(elem)); + ++objectsCount; } else if (elem.value.getTag() != JSON_NULL) { kv = jsonValue2Variant(elem.value, KeyValueType::Undefined{}); kv.EnsureHold(); } - if (!kvs.empty() && !kvs.back().Type().IsSame(kv.Type())) { - if (kvs.size() != 1 || !((kvs[0].Type().template Is() && - (kv.Type().Is() || kv.Type().Is() || - kv.Type().Is())) || - (kv.Type().Is() && (kvs[0].Type().template Is() || - kvs[0].Type().template Is() || - kvs[0].Type().template Is())))) { - throw Error(errParseJson, "Array of filter values must be homogeneous."); - } - } kvs.emplace_back(std::move(kv)); } + + if ((0 < objectsCount) && (objectsCount < kvs.size())) { + throw Error(errParseJson, "Array with objects must be homogeneous"); + } } else if (values.getTag() != JSON_NULL) { Variant kv(jsonValue2Variant(values, KeyValueType::Undefined{})); kv.EnsureHold(); @@ -218,7 +233,7 @@ static void parseSortEntry(const JsonValue& entry, SortingEntries& sortingEntrie for (const auto& subelement : entry) { auto& v = subelement.value; std::string_view name = subelement.key; - switch (get(sort_map, name, "sort"sv)) { + switch (get(kSortMap, name, "sort"sv)) { case Sort::Desc: if ((v.getTag() != JSON_TRUE) && (v.getTag() != JSON_FALSE)) throw Error(errParseJson, "Wrong type of field '%s'", name); sortingEntry.desc = (v.getTag() == JSON_TRUE); @@ -277,7 +292,7 @@ static void parseFilter(const JsonValue& filter, Query& q, std::vector(op_map, v.toString(), "operation enum"sv); + op = get(kOpMap, v.toString(), "operation enum"sv); break; case Filter::Value: @@ -407,7 +422,7 @@ static void parseJoinedEntries(const JsonValue& joinEntries, JoinedQuery& qjoin) break; case JoinEntry::Op: checkJsonValueType(value, name, JSON_STRING); - op = get(op_map, value.toString(), "operation enum"sv); + op = get(kOpMap, value.toString(), "operation enum"sv); break; } } @@ -491,7 +506,7 @@ static void parseAggregation(const JsonValue& aggregation, Query& query) { for (const auto& element : aggregation) { auto& value = element.value; std::string_view name = element.key; - switch (get(aggregation_map, name, "aggregations"sv)) { + switch (get(kAggregationMap, name, "aggregations"sv)) { case Aggregation::Fields: checkJsonValueType(value, name, JSON_ARRAY); for (const auto& subElem : value) { @@ -501,7 +516,7 @@ static void parseAggregation(const JsonValue& aggregation, Query& query) { break; case Aggregation::Type: checkJsonValueType(value, name, JSON_STRING); - type = get(aggregation_types, value.toString(), "aggregation type enum"sv); + type = get(kAggregationTypes, value.toString(), "aggregation type enum"sv); if (!query.CanAddAggregation(type)) { throw Error(errConflict, kAggregationWithSelectFieldsMsgError); } @@ -534,7 +549,7 @@ void parseEqualPositions(const JsonValue& dsl, std::vector(equationPosition_map, name, "equal_positions"sv)) { + switch (get(kEquationPositionMap, name, "equal_positions"sv)) { case EqualPosition::Positions: { EqualPosition_t ep; for (const auto& f : value) { @@ -562,14 +577,14 @@ static void parseUpdateFields(const JsonValue& updateFields, Query& query) { for (const auto& v : field) { auto& value = v.value; std::string_view name = v.key; - switch (get(update_field_map, name, "update_fields"sv)) { + switch (get(kUpdateFieldMap, name, "update_fields"sv)) { case UpdateField::Name: checkJsonValueType(value, name, JSON_STRING); fieldName.assign(value.sval.data(), value.sval.size()); break; case UpdateField::Type: { checkJsonValueType(value, name, JSON_STRING); - switch (get(update_field_type_map, value.toString(), "update_fields_type"sv)) { + switch (get(kUpdateFieldTypeMap, value.toString(), "update_fields_type"sv)) { case UpdateFieldType::Object: isObject = true; break; @@ -593,7 +608,7 @@ static void parseUpdateFields(const JsonValue& updateFields, Query& query) { } } if (isExpression && (values.size() != 1 || !values.front().Type().template Is())) - throw Error(errParseDSL, "The array \"values\" must contain only a string type value for the type \"expression\""); + throw Error(errParseDSL, R"(The array "values" must contain only a string type value for the type "expression")"); if (isObject) { query.SetObject(fieldName, std::move(values)); @@ -612,7 +627,7 @@ void parse(const JsonValue& root, Query& q) { for (const auto& elem : root) { auto& v = elem.value; auto name = elem.key; - switch (get(root_map, name, "root"sv)) { + switch (get(kRootMap, name, "root"sv)) { case Root::Namespace: checkJsonValueType(v, name, JSON_STRING); q.SetNsName(v.toString()); @@ -656,7 +671,7 @@ void parse(const JsonValue& root, Query& q) { break; case Root::ReqTotal: checkJsonValueType(v, name, JSON_STRING); - q.CalcTotal(get(reqtotal_values, v.toString(), "req_total enum"sv)); + q.CalcTotal(get(kReqTotalValues, v.toString(), "req_total enum"sv)); break; case Root::Aggregations: checkJsonValueType(v, name, JSON_ARRAY); @@ -681,7 +696,7 @@ void parse(const JsonValue& root, Query& q) { throw Error(errParseDSL, "Unsupported old DSL format. Equal positions should be in filters."); case Root::QueryType: checkJsonValueType(v, name, JSON_STRING); - q.type_ = get(query_types, v.toString(), "query_type"sv); + q.type_ = get(kQueryTypes, v.toString(), "query_type"sv); break; case Root::DropFields: checkJsonValueType(v, name, JSON_ARRAY); diff --git a/cpp_src/core/query/expressionevaluator.cc b/cpp_src/core/query/expressionevaluator.cc index 7d585328b..a4b8d3a77 100644 --- a/cpp_src/core/query/expressionevaluator.cc +++ b/cpp_src/core/query/expressionevaluator.cc @@ -237,7 +237,7 @@ void ExpressionEvaluator::handleCommand(tokenizer& parser, const PayloadValue& v if (cmd == Command::ArrayRemoveOnce) { // remove elements from array once auto it = std::find_if(values.begin(), values.end(), [&item](const auto& elem) { - return item.RelaxCompare(elem) == ComparationResult::Eq; + return item.RelaxCompare(elem) == ComparationResult::Eq; }); if (it != values.end()) { values.erase(it); @@ -246,7 +246,7 @@ void ExpressionEvaluator::handleCommand(tokenizer& parser, const PayloadValue& v // remove elements from array values.erase(std::remove_if(values.begin(), values.end(), [&item](const auto& elem) { - return item.RelaxCompare(elem) == ComparationResult::Eq; + return item.RelaxCompare(elem) == ComparationResult::Eq; }), values.end()); } diff --git a/cpp_src/core/query/query.h b/cpp_src/core/query/query.h index 86cbac666..c44c4f1af 100644 --- a/cpp_src/core/query/query.h +++ b/cpp_src/core/query/query.h @@ -157,7 +157,7 @@ class Query { /// @param l - list of values to be compared according to the order /// of indexes in composite index name. /// There can be maximum 2 VariantArray objects in l: in case of CondRange condition, - /// in all other cases amount of elements in l would be striclty equal to 1. + /// in all other cases amount of elements in l would be strictly equal to 1. /// For example, composite index name is "bookid+price", so l[0][0] (and l[1][0] /// in case of CondRange) belongs to "bookid" and l[0][1] (and l[1][1] in case of CondRange) /// belongs to "price" indexes. @@ -222,7 +222,7 @@ class Query { } else { q.checkSubQueryWithData(); if (!q.selectFilter_.empty() && !q.HasLimit() && !q.HasOffset()) { - // Transforms main query condition into subquerie's condition + // Converts main query condition to subquery condition q.sortingEntries_.clear(); q.Where(q.selectFilter_[0], cond, std::move(values)); q.selectFilter_.clear(); @@ -288,7 +288,7 @@ class Query { /// Sets a new value for a field. /// @param field - field name. /// @param value - new value. - /// @param hasExpressions - true: value has expresions in it + /// @param hasExpressions - true: value has expressions in it template > * = nullptr> Query &Set(Str &&field, ValueType value, bool hasExpressions = false) & { return Set(std::forward(field), {value}, hasExpressions); @@ -300,7 +300,7 @@ class Query { /// Sets a new value for a field. /// @param field - field name. /// @param l - new value. - /// @param hasExpressions - true: value has expresions in it + /// @param hasExpressions - true: value has expressions in it template > * = nullptr> Query &Set(Str &&field, std::initializer_list l, bool hasExpressions = false) & { VariantArray value; @@ -315,7 +315,7 @@ class Query { /// Sets a new value for a field. /// @param field - field name. /// @param l - new value. - /// @param hasExpressions - true: value has expresions in it + /// @param hasExpressions - true: value has expressions in it template > * = nullptr> Query &Set(Str &&field, const std::vector &l, bool hasExpressions = false) & { VariantArray value; @@ -330,7 +330,7 @@ class Query { /// Sets a new value for a field. /// @param field - field name. /// @param value - new value. - /// @param hasExpressions - true: value has expresions in it + /// @param hasExpressions - true: value has expressions in it template > * = nullptr> Query &Set(Str &&field, VariantArray value, bool hasExpressions = false) & { updateFields_.emplace_back(std::forward(field), std::move(value), FieldModeSet, hasExpressions); @@ -343,7 +343,7 @@ class Query { /// Sets a value for a field as an object. /// @param field - field name. /// @param value - new value. - /// @param hasExpressions - true: value has expresions in it + /// @param hasExpressions - true: value has expressions in it template > * = nullptr> Query &SetObject(Str &&field, ValueType value, bool hasExpressions = false) & { return SetObject(std::forward(field), {value}, hasExpressions); @@ -355,7 +355,7 @@ class Query { /// Sets a new value for a field as an object. /// @param field - field name. /// @param l - new value. - /// @param hasExpressions - true: value has expresions in it + /// @param hasExpressions - true: value has expressions in it template > * = nullptr> Query &SetObject(Str &&field, std::initializer_list l, bool hasExpressions = false) & { VariantArray value; @@ -370,7 +370,7 @@ class Query { /// Sets a new value for a field as an object. /// @param field - field name. /// @param l - new value. - /// @param hasExpressions - true: value has expresions in it + /// @param hasExpressions - true: value has expressions in it template > * = nullptr> Query &SetObject(Str &&field, const std::vector &l, bool hasExpressions = false) & { VariantArray value; @@ -385,7 +385,7 @@ class Query { /// Sets a value for a field as an object. /// @param field - field name. /// @param value - new value. - /// @param hasExpressions - true: value has expresions in it + /// @param hasExpressions - true: value has expressions in it template > * = nullptr> Query &SetObject(Str &&field, VariantArray value, bool hasExpressions = false) & { for (auto &it : value) { @@ -636,7 +636,7 @@ class Query { } /// Performs distinct for a certain index. - /// @param indexName - name of index for distict operation. + /// @param indexName - name of index for distinct operation. template > * = nullptr> Query &Distinct(Str &&indexName) & { if (!strEmpty(indexName)) { @@ -980,7 +980,7 @@ class JoinedQuery : public Query { [[nodiscard]] bool operator==(const JoinedQuery &obj) const; JoinType joinType{JoinType::LeftJoin}; /// Default join type. - h_vector joinEntries_; /// Condition for join. Filled in each subqueries, empty in root query + h_vector joinEntries_; /// Condition for join. Filled in each subqueries, empty in root query }; template diff --git a/cpp_src/core/query/sql/sqlencoder.cc b/cpp_src/core/query/sql/sqlencoder.cc index f6ea3a54e..9a4b6298a 100644 --- a/cpp_src/core/query/sql/sqlencoder.cc +++ b/cpp_src/core/query/sql/sqlencoder.cc @@ -258,7 +258,7 @@ WrSerializer &SQLEncoder::GetSQL(WrSerializer &ser, bool stripArgs) const { return ser; } -static const char *opNames[] = {"-", "OR", "AND", "AND NOT"}; +constexpr static std::string_view kOpNames[] = {"-", "OR", "AND", "AND NOT"}; template static void dumpCondWithValues(WrSerializer &ser, std::string_view fieldName, CondType cond, const VariantArray &values, bool stripArgs) { @@ -336,14 +336,14 @@ void SQLEncoder::dumpWhereEntries(QueryEntries::const_iterator from, QueryEntrie }, [&](const SubQueryEntry &sqe) { if (encodedEntries) { - ser << opNames[op] << ' '; + ser << kOpNames[op] << ' '; } dumpCondWithValues(ser, '(' + query_.GetSubQuery(sqe.QueryIndex()).GetSQL(stripArgs) + ')', sqe.Condition(), sqe.Values(), stripArgs); }, [&](const SubQueryFieldEntry &sqe) { if (encodedEntries) { - ser << opNames[op] << ' '; + ser << kOpNames[op] << ' '; } ser << sqe.FieldName() << ' ' << sqe.Condition() << " ("; SQLEncoder{query_.GetSubQuery(sqe.QueryIndex())}.GetSQL(ser, stripArgs); @@ -351,7 +351,7 @@ void SQLEncoder::dumpWhereEntries(QueryEntries::const_iterator from, QueryEntrie }, [&](const QueryEntriesBracket &bracket) { if (encodedEntries) { - ser << opNames[op] << ' '; + ser << kOpNames[op] << ' '; } ser << '('; dumpWhereEntries(it.cbegin(), it.cend(), ser, stripArgs); @@ -360,19 +360,19 @@ void SQLEncoder::dumpWhereEntries(QueryEntries::const_iterator from, QueryEntrie }, [&](const QueryEntry &entry) { if (encodedEntries) { - ser << opNames[op] << ' '; + ser << kOpNames[op] << ' '; } dumpCondWithValues(ser, entry.FieldName(), entry.Condition(), entry.Values(), stripArgs); }, [&](const JoinQueryEntry &jqe) { if (encodedEntries && query_.GetJoinQueries()[jqe.joinIndex].joinType != JoinType::OrInnerJoin) { - ser << opNames[op] << ' '; + ser << kOpNames[op] << ' '; } SQLEncoder(query_).DumpSingleJoinQuery(jqe.joinIndex, ser, stripArgs); }, [&](const BetweenFieldsQueryEntry &entry) { if (encodedEntries) { - ser << opNames[op] << ' '; + ser << kOpNames[op] << ' '; } indexToSql(entry.LeftFieldName(), ser); ser << ' ' << entry.Condition() << ' '; diff --git a/cpp_src/core/query/sql/sqlparser.cc b/cpp_src/core/query/sql/sqlparser.cc index af434e4e0..ae4d7ba1c 100644 --- a/cpp_src/core/query/sql/sqlparser.cc +++ b/cpp_src/core/query/sql/sqlparser.cc @@ -432,7 +432,6 @@ int SQLParser::deleteParse(tokenizer &parser) { } static void addUpdateValue(const token &currTok, tokenizer &parser, UpdateEntry &updateField) { - updateField.SetMode(FieldModeSet); if (currTok.type == TokenString) { updateField.Values().push_back(token2kv(currTok, parser, false)); } else { @@ -504,6 +503,16 @@ void SQLParser::parseArray(tokenizer &parser, std::string_view tokText, UpdateEn throw Error(errParseSQL, "Expected ']' or ',', but found '%s' in query, %s", tok.text(), parser.where()); } } + + if (updateField && (updateField->Mode() == FieldModeSetJson)) { + for (const auto &it : updateField->Values()) { + if ((!it.Type().Is()) || + std::string_view(it).front() != '{') { + throw Error(errLogic, "Unexpected variant type in Array: %s. Expecting KeyValueType::String with JSON-content", + it.Type().Name()); + } + } + } } void SQLParser::parseCommand(tokenizer &parser) const { @@ -571,6 +580,7 @@ UpdateEntry SQLParser::parseUpdateField(tokenizer &parser) { if (tok.text() == "["sv) { updateField.Values().MarkArray(); parseArray(parser, tok.text(), &updateField); + updateField.SetIsExpression(false); } else if (tok.text() == "array_remove"sv || tok.text() == "array_remove_once"sv) { parseCommand(parser); diff --git a/cpp_src/core/queryresults/queryresults.cc b/cpp_src/core/queryresults/queryresults.cc index 8d853f016..df94408b5 100644 --- a/cpp_src/core/queryresults/queryresults.cc +++ b/cpp_src/core/queryresults/queryresults.cc @@ -115,7 +115,7 @@ std::string QueryResults::Dump() const { if (&items_[i] != &*items_.begin()) buf += ","; buf += std::to_string(items_[i].Id()); if (joined_.empty()) continue; - Iterator itemIt{this, int(i), errOK}; + Iterator itemIt{this, int(i), errOK, {}}; auto joinIt = itemIt.GetJoined(); if (joinIt.getJoinedItemsCount() > 0) { buf += "["; @@ -149,8 +149,39 @@ int QueryResults::GetJoinedNsCtxIndex(int nsid) const noexcept { class QueryResults::EncoderDatasourceWithJoins final : public IEncoderDatasourceWithJoins { public: - EncoderDatasourceWithJoins(const joins::ItemIterator &joinedItemIt, const ContextsVector &ctxs, int ctxIdx) noexcept - : joinedItemIt_(joinedItemIt), ctxs_(ctxs), ctxId_(ctxIdx) {} + EncoderDatasourceWithJoins(const joins::ItemIterator &joinedItemIt, const ContextsVector &ctxs, Iterator::NsNamesCache &nsNamesCache, + int ctxIdx, size_t nsid, size_t joinedCount) noexcept + : joinedItemIt_(joinedItemIt), ctxs_(ctxs), nsNamesCache_(nsNamesCache), ctxId_(ctxIdx), nsid_{nsid} { + if (nsNamesCache.size() <= nsid_) { + nsNamesCache.resize(nsid_ + 1); + } + if (nsNamesCache[nsid_].size() < joinedCount) { + nsNamesCache[nsid_].clear(); + nsNamesCache[nsid_].reserve(joinedCount); + fast_hash_map namesCounters; + assertrx_dbg(ctxs_.size() >= ctxId_ + joinedCount); + for (size_t i = ctxId_, end = ctxId_ + joinedCount; i < end; ++i) { + const std::string &n = ctxs_[i].type_.Name(); + if (auto [it, emplaced] = namesCounters.emplace(n, -1); !emplaced) { + --it->second; + } + } + for (size_t i = ctxId_, end = ctxId_ + joinedCount; i < end; ++i) { + const std::string &n = ctxs_[i].type_.Name(); + int &count = namesCounters[n]; + if (count < 0) { + if (count == -1) { + nsNamesCache[nsid_].emplace_back(n); + } else { + count = 1; + nsNamesCache[nsid_].emplace_back("1_" + n); + } + } else { + nsNamesCache[nsid_].emplace_back(std::to_string(++count) + '_' + n); + } + } + } + } ~EncoderDatasourceWithJoins() override = default; size_t GetJoinedRowsCount() const noexcept override final { return joinedItemIt_.getJoinedFieldsCount(); } @@ -172,15 +203,14 @@ class QueryResults::EncoderDatasourceWithJoins final : public IEncoderDatasource const Context &ctx = ctxs_[ctxId_ + rowid]; return ctx.fieldsFilter_; } - const std::string &GetJoinedItemNamespace(size_t rowid) noexcept override final { - const Context &ctx = ctxs_[ctxId_ + rowid]; - return ctx.type_->Name(); - } + const std::string &GetJoinedItemNamespace(size_t rowid) const noexcept override final { return nsNamesCache_[nsid_][rowid]; } private: const joins::ItemIterator &joinedItemIt_; const ContextsVector &ctxs_; + const Iterator::NsNamesCache &nsNamesCache_; const int ctxId_; + const size_t nsid_; }; class AdditionalDatasource : public IAdditionalDatasource { @@ -208,7 +238,7 @@ class AdditionalDatasourceCSV : public IAdditionalDatasource { IEncoderDatasourceWithJoins *joinsDs_; }; -void QueryResults::encodeJSON(int idx, WrSerializer &ser) const { +void QueryResults::encodeJSON(int idx, WrSerializer &ser, Iterator::NsNamesCache &nsNamesCache) const { auto &itemRef = items_[idx]; assertrx(ctxs.size() > itemRef.Nsid()); auto &ctx = ctxs[itemRef.Nsid()]; @@ -225,7 +255,8 @@ void QueryResults::encodeJSON(int idx, WrSerializer &ser) const { if (!joined_.empty()) { joins::ItemIterator itemIt = (begin() + idx).GetJoined(); if (itemIt.getJoinedItemsCount() > 0) { - EncoderDatasourceWithJoins joinsDs(itemIt, ctxs, GetJoinedNsCtxIndex(itemRef.Nsid())); + EncoderDatasourceWithJoins joinsDs(itemIt, ctxs, nsNamesCache, GetJoinedNsCtxIndex(itemRef.Nsid()), itemRef.Nsid(), + joined_[itemRef.Nsid()].GetJoinedSelectorsCount()); if (needOutputRank) { AdditionalDatasource ds(itemRef.Proc(), &joinsDs); encoder.Encode(pl, builder, &ds); @@ -295,9 +326,9 @@ Error QueryResults::Iterator::GetJSON(WrSerializer &ser, bool withHdrLen) { try { if (withHdrLen) { auto slicePosSaver = ser.StartSlice(); - qr_->encodeJSON(idx_, ser); + qr_->encodeJSON(idx_, ser, nsNamesCache); } else { - qr_->encodeJSON(idx_, ser); + qr_->encodeJSON(idx_, ser, nsNamesCache); } } catch (const Error &err) { err_ = err; @@ -321,9 +352,10 @@ CsvOrdering QueryResults::MakeCSVTagOrdering(unsigned limit, unsigned offset) co fast_hash_set fieldsTmIds; WrSerializer ser; const auto &tm = getTagsMatcher(0); + Iterator::NsNamesCache nsNamesCache; for (size_t i = offset; i < items_.size() && i < offset + limit; ++i) { ser.Reset(); - encodeJSON(i, ser); + encodeJSON(i, ser, nsNamesCache); gason::JsonParser parser; auto jsonNode = parser.Parse(giftStr(ser.Slice())); @@ -355,7 +387,8 @@ Error QueryResults::Iterator::GetCSV(WrSerializer &ser, CsvOrdering &ordering) n if (!qr_->joined_.empty()) { joins::ItemIterator itemIt = (qr_->begin() + idx_).GetJoined(); if (itemIt.getJoinedItemsCount() > 0) { - EncoderDatasourceWithJoins joinsDs(itemIt, qr_->ctxs, qr_->GetJoinedNsCtxIndex(itemRef.Nsid())); + EncoderDatasourceWithJoins joinsDs(itemIt, qr_->ctxs, nsNamesCache, qr_->GetJoinedNsCtxIndex(itemRef.Nsid()), + itemRef.Nsid(), qr_->joined_[itemRef.Nsid()].GetJoinedSelectorsCount()); AdditionalDatasourceCSV ds(&joinsDs); encoder.Encode(pl, builder, &ds); return errOK; diff --git a/cpp_src/core/queryresults/queryresults.h b/cpp_src/core/queryresults/queryresults.h index b21934a74..dc3f510d5 100644 --- a/cpp_src/core/queryresults/queryresults.h +++ b/cpp_src/core/queryresults/queryresults.h @@ -101,11 +101,13 @@ class QueryResults { const QueryResults *qr_; int idx_; Error err_; + using NsNamesCache = h_vector, 1>; + NsNamesCache nsNamesCache; }; - Iterator begin() const noexcept { return Iterator{this, 0, errOK}; } - Iterator end() const noexcept { return Iterator{this, int(items_.size()), errOK}; } - Iterator operator[](int idx) const noexcept { return Iterator{this, idx, errOK}; } + Iterator begin() const noexcept { return Iterator{this, 0, errOK, {}}; } + Iterator end() const noexcept { return Iterator{this, int(items_.size()), errOK, {}}; } + Iterator operator[](int idx) const noexcept { return Iterator{this, idx, errOK, {}}; } std::vector joined_; std::vector aggregationResults; @@ -154,12 +156,12 @@ class QueryResults { std::string explainResults; -protected: +private: class EncoderDatasourceWithJoins; class EncoderAdditionalDatasource; + void encodeJSON(int idx, WrSerializer &ser, Iterator::NsNamesCache &) const; public: - void encodeJSON(int idx, WrSerializer &ser) const; ItemRefVector items_; std::optional activityCtx_; friend InternalRdxContext; diff --git a/cpp_src/core/reindexer_impl/rx_selector.cc b/cpp_src/core/reindexer_impl/rx_selector.cc index 0f688821f..c6eb63a6f 100644 --- a/cpp_src/core/reindexer_impl/rx_selector.cc +++ b/cpp_src/core/reindexer_impl/rx_selector.cc @@ -2,6 +2,7 @@ #include "core/nsselecter/nsselecter.h" #include "core/nsselecter/querypreprocessor.h" #include "core/queryresults/joinresults.h" +#include "estl/charset.h" #include "estl/restricted.h" #include "tools/logger.h" @@ -191,10 +192,10 @@ void RxSelector::DoSelect(const Query& q, QueryResults& result, NsLocker& loc } [[nodiscard]] static bool byJoinedField(std::string_view sortExpr, std::string_view joinedNs) { - static const fast_hash_set allowedSymbolsInIndexName{ - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', - 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', - 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_', '.', '+'}; + constexpr static estl::Charset kJoinedIndexNameSyms{'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', + 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', + 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', + 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '_', '.', '+'}; std::string_view::size_type i = 0; const auto s = sortExpr.size(); while (i < s && isspace(sortExpr[i])) ++i; @@ -210,7 +211,7 @@ void RxSelector::DoSelect(const Query& q, QueryResults& result, NsLocker& loc } if (i >= s || sortExpr[i] != '.') return false; for (++i; i < s; ++i) { - if (allowedSymbolsInIndexName.find(sortExpr[i]) == allowedSymbolsInIndexName.end()) { + if (!kJoinedIndexNameSyms.test(sortExpr[i])) { if (isspace(sortExpr[i])) break; if (inQuotes && sortExpr[i] == '"') { inQuotes = false; diff --git a/cpp_src/core/rollback.h b/cpp_src/core/rollback.h index df61f6633..aa6d867df 100644 --- a/cpp_src/core/rollback.h +++ b/cpp_src/core/rollback.h @@ -13,7 +13,7 @@ class RollBackBase { RollBackBase &operator=(const RollBackBase &) = delete; RollBackBase &operator=(RollBackBase &&) = delete; virtual void Disable() noexcept { disabled_ = true; } - bool IsDisabled() const noexcept { return disabled_; } + [[nodiscard]] bool IsDisabled() const noexcept { return disabled_; } private: bool disabled_{false}; diff --git a/cpp_src/core/type_consts.h b/cpp_src/core/type_consts.h index 4f61cc00c..ef33bcf6a 100644 --- a/cpp_src/core/type_consts.h +++ b/cpp_src/core/type_consts.h @@ -14,6 +14,8 @@ typedef enum TagType { TAG_UUID = 8, } TagType; +static const uint8_t kMaxTagType = TAG_UUID; + typedef enum IndexType { IndexStrHash = 0, IndexStrBTree = 1, diff --git a/cpp_src/coroutine/channel.h b/cpp_src/coroutine/channel.h index 699dc12ba..d436afe72 100644 --- a/cpp_src/coroutine/channel.h +++ b/cpp_src/coroutine/channel.h @@ -3,6 +3,8 @@ #include "coroutine.h" #include "estl/h_vector.h" +#include + namespace reindexer { namespace coroutine { @@ -29,7 +31,7 @@ class channel { /// @param obj - Object to push template void push(U &&obj) { - assertrx(current()); // For now channels should not be used from main routine dew to current resume/suspend logic + assertrx(current()); // For now channels should not be used from main routine dew to current resume/suspend logic bool await = false; while (full() || closed_) { if (closed_) { @@ -62,7 +64,7 @@ class channel { /// writers. /// @return Pair of value and flag. Flag shows if it's actual value from channel (true) or default constructed one (false) std::pair pop() { - assertrx(current()); // For now channels should not be used from main routine dew to current resume/suspend logic + assertrx(current()); // For now channels should not be used from main routine dew to current resume/suspend logic bool await = false; while (empty() && !closed_) { if (!await) { diff --git a/cpp_src/coroutine/coroutine.cc b/cpp_src/coroutine/coroutine.cc index 411d2e5da..fc2aa749f 100644 --- a/cpp_src/coroutine/coroutine.cc +++ b/cpp_src/coroutine/coroutine.cc @@ -2,6 +2,7 @@ #include #include #include +#include #include "tools/clock.h" namespace reindexer { diff --git a/cpp_src/doc/CMakeLists.txt b/cpp_src/doc/CMakeLists.txt index 359b8c725..2a4daa3dd 100644 --- a/cpp_src/doc/CMakeLists.txt +++ b/cpp_src/doc/CMakeLists.txt @@ -14,12 +14,12 @@ if(DOXYGEN_FOUND) COMMAND ${python3} ${Dox2html} ${PROJECT_SOURCE_DIR}/Doxyfile-mcss COMMENT "Generating Reindexer documentation with Doxygen and Dox2html" ) - else () + else() set(doxyfile ${PROJECT_SOURCE_DIR}/Doxyfile) add_custom_target(doc WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMAND sh -c "DOXYGEN_OUTPUT_DIRECTORY=${PROJECT_BINARY_DIR} ${DOXYGEN_EXECUTABLE} ${doxyfile}" COMMENT "Generating Reindexer documentation with Doxygen" ) - endif () + endif() endif() diff --git a/cpp_src/estl/charset.h b/cpp_src/estl/charset.h new file mode 100644 index 000000000..b10cf80ef --- /dev/null +++ b/cpp_src/estl/charset.h @@ -0,0 +1,43 @@ +#pragma once + +#include +#include + +namespace reindexer::estl { + +class Charset { +public: + constexpr Charset(std::initializer_list list) { + for (const auto& c : list) { + set(c); + } + } + constexpr bool test(uint8_t pos) const noexcept { return getword(pos) & maskbit(pos); } + constexpr Charset& set(uint8_t pos, bool val = true) noexcept { + if (val) { + this->getword(pos) |= maskbit(pos); + } else { + this->getword(pos) &= ~maskbit(pos); + } + return *this; + } + constexpr static size_t max_values_count() noexcept { return kWordsCount * kBitsPerWord; } + +private: + using WordT = uint64_t; + constexpr static size_t kBitsPerWord = 64; + constexpr static size_t kWordsCount = 4; + + static constexpr uint8_t whichword(uint8_t pos) noexcept { return pos / kBitsPerWord; } + static constexpr WordT maskbit(uint8_t pos) noexcept { return WordT(1) << whichbit(pos); } + static constexpr size_t whichbit(uint8_t pos) noexcept { return pos % kBitsPerWord; } + constexpr WordT getword(uint8_t pos) const noexcept { return set_[whichword(pos)]; } + constexpr WordT& getword(uint8_t pos) noexcept { return set_[whichword(pos)]; } + + WordT set_[kWordsCount] = {0}; +}; + +static_assert(std::numeric_limits::max() - std::numeric_limits::min() + 1 == Charset::max_values_count(), + "Expecting max uint8_t range of [0, 255] for the simplicity"); + +} // namespace reindexer::estl diff --git a/cpp_src/gtests/tests/API/base_tests.cc b/cpp_src/gtests/tests/API/base_tests.cc index 77f3ee314..eda972e39 100644 --- a/cpp_src/gtests/tests/API/base_tests.cc +++ b/cpp_src/gtests/tests/API/base_tests.cc @@ -1919,7 +1919,6 @@ TEST_F(ReindexerApi, UpdateDoublesItemByPKIndex) { }; constexpr size_t kItemsCount = 4; std::vector data; - std::string checkUuid; for (unsigned i = 0; i < kItemsCount; i++) { Item item(rt.reindexer->NewItem(default_namespace)); ASSERT_TRUE(!!item); diff --git a/cpp_src/gtests/tests/CMakeLists.txt b/cpp_src/gtests/tests/CMakeLists.txt index a124db0ac..cb172998d 100644 --- a/cpp_src/gtests/tests/CMakeLists.txt +++ b/cpp_src/gtests/tests/CMakeLists.txt @@ -33,11 +33,11 @@ if (ENABLE_GRPC) endforeach (CUR_PROTO_FILE) add_definitions(-DWITH_PROTOBUF=1) include_directories(${REINDEXER_BINARY_PATH}/server/grpc ${GENERATED_PROTO_DIR}) - else () + else() message("Protobuf not found") - endif () + endif() -endif () +endif() file(GLOB_RECURSE SRCS *.cc *.h ${GENERATED_PROTO_DIR}/*.cc) @@ -56,35 +56,35 @@ target_link_libraries(${FUZZING_TARGET} ${REINDEXER_LIBRARIES} ${GTEST_LIBRARIES if (ENABLE_GRPC) add_definitions(-DWITH_GRPC) target_link_libraries(${TARGET} reindexer_grpc_library) -endif () +endif() set(GTEST_TIMEOUT 600) if (WITH_ASAN OR WITH_TSAN OR WITH_STDLIB_DEBUG) set(GTEST_TIMEOUT 2000) -endif () +endif() find_program(GTEST_PARALLEL "gtest-parallel") if (GTEST_PARALLEL) if (XML_REPORT) add_test(NAME gtests COMMAND gtest-parallel --gtest_color=no --serialize_test_cases --print_test_times --timeout ${GTEST_TIMEOUT} --gtest_output=xml:${REINDEXER_SOURCE_PATH}/testReport.xml "./${TARGET}") - else () + else() add_test(NAME gtests COMMAND gtest-parallel --serialize_test_cases --print_test_times --timeout ${GTEST_TIMEOUT} "./${TARGET}") - endif () -else () + endif() +else() add_test(NAME gtests COMMAND ${TARGET} --gtest_color=yes) -endif () +endif() if (WITH_TSAN) set_tests_properties(gtests PROPERTIES ENVIRONMENT TSAN_OPTIONS=suppressions=${REINDEXER_SOURCE_PATH}/gtests/tsan.suppressions) -endif () +endif() add_definitions(-DREINDEXER_TESTS_DATA_PATH="${REINDEXER_SOURCE_PATH}/gtests/tests_data") if (ENABLE_SERVER_AS_PROCESS_IN_TEST) if (CMAKE_SYSTEM_NAME MATCHES "Linux") add_definitions(-DREINDEXER_WITH_SC_AS_PROCESS) add_definitions(-DREINDEXER_SERVER_PATH="$") - else () + else() message(WARNING "\nENABLE_SERVER_AS_PROCESS_IN_TEST is set but the OS is not linux. Option disabled!\n") - endif () -endif () + endif() +endif() diff --git a/cpp_src/gtests/tests/fixtures/grpcclient_api.h b/cpp_src/gtests/tests/fixtures/grpcclient_api.h index 3408cc05f..d8ce17632 100644 --- a/cpp_src/gtests/tests/fixtures/grpcclient_api.h +++ b/cpp_src/gtests/tests/fixtures/grpcclient_api.h @@ -10,6 +10,7 @@ #include "core/cjson/cjsondecoder.h" #include "core/cjson/jsonbuilder.h" #include "core/payload/payloadiface.h" +#include "server/server.h" #include "reindexer_api.h" #include "tools/fsops.h" #include "yaml-cpp/yaml.h" diff --git a/cpp_src/gtests/tests/fixtures/ns_api.h b/cpp_src/gtests/tests/fixtures/ns_api.h index 49c40f3ff..5f7313d7f 100644 --- a/cpp_src/gtests/tests/fixtures/ns_api.h +++ b/cpp_src/gtests/tests/fixtures/ns_api.h @@ -79,7 +79,7 @@ class NsApi : public ReindexerApi { } } - void AddHeterogeniousNestedData() { + void AddHeterogeneousNestedData() { char sourceJson[4096]; const char jsonPattern[] = R"json({ diff --git a/cpp_src/gtests/tests/fixtures/queries_verifier.h b/cpp_src/gtests/tests/fixtures/queries_verifier.h index 2f0ff760a..bdc231aa5 100644 --- a/cpp_src/gtests/tests/fixtures/queries_verifier.h +++ b/cpp_src/gtests/tests/fixtures/queries_verifier.h @@ -145,7 +145,7 @@ class QueriesVerifier : public virtual ::testing::Test { if (!conditionsSatisfied) { std::stringstream ss; ss << "Item doesn't match conditions: " << itemr.GetJSON() << std::endl; - const auto& jit = qr[i].GetJoined(); + const auto jit = qr[i].GetJoined(); if (jit.getJoinedItemsCount() > 0) { ss << "Joined:" << std::endl; for (int fIdx = 0, fCount = jit.getJoinedFieldsCount(); fIdx < fCount; ++fIdx) { diff --git a/cpp_src/gtests/tests/fixtures/reindexer_api.h b/cpp_src/gtests/tests/fixtures/reindexer_api.h index 747a2f443..70dbc4b5b 100644 --- a/cpp_src/gtests/tests/fixtures/reindexer_api.h +++ b/cpp_src/gtests/tests/fixtures/reindexer_api.h @@ -2,7 +2,6 @@ #include "core/reindexer.h" #include "reindexertestapi.h" -#include "servercontrol.h" using reindexer::Error; using reindexer::Item; @@ -25,12 +24,10 @@ class ReindexerApi : public virtual ::testing::Test { public: ReindexerApi() = default; - void DefineNamespaceDataset(const std::string &ns, std::initializer_list fields) { - rt.DefineNamespaceDataset(ns, fields); - } - void DefineNamespaceDataset(const std::string &ns, const std::vector &fields) { + void DefineNamespaceDataset(std::string_view ns, std::initializer_list fields) { rt.DefineNamespaceDataset(ns, fields); } + void DefineNamespaceDataset(std::string_view ns, const std::vector &fields) { rt.DefineNamespaceDataset(ns, fields); } void DefineNamespaceDataset(Reindexer &rx, const std::string &ns, std::initializer_list fields) { rt.DefineNamespaceDataset(rx, ns, fields); } diff --git a/cpp_src/gtests/tests/fixtures/replication_api.cc b/cpp_src/gtests/tests/fixtures/replication_api.cc index abfc31b5b..4c48a9677 100644 --- a/cpp_src/gtests/tests/fixtures/replication_api.cc +++ b/cpp_src/gtests/tests/fixtures/replication_api.cc @@ -58,7 +58,7 @@ void ReplicationApi::RestartServer(size_t id) { void ReplicationApi::WaitSync(const std::string& ns) { auto now = std::chrono::milliseconds(0); const auto pause = std::chrono::milliseconds(10); - ReplicationStateApi state{lsn_t(), lsn_t(), 0, 0, false}; + ReplicationStateApi state{reindexer::lsn_t(), reindexer::lsn_t(), 0, 0, false}; while (state.lsn.isEmpty()) { now += pause; ASSERT_TRUE(now < kMaxSyncTime); @@ -67,7 +67,7 @@ void ReplicationApi::WaitSync(const std::string& ns) { if (i != masterId_) { state = GetSrv(i)->GetState(ns); if (xstate.lsn != state.lsn) { - state.lsn = lsn_t(); + state.lsn = reindexer::lsn_t(); break; } else if (!state.lsn.isEmpty()) { ASSERT_EQ(state.dataHash, xstate.dataHash) << "name: " << ns << ", lsns: " << int64_t(state.lsn) << " " diff --git a/cpp_src/gtests/tests/fixtures/replication_api.h b/cpp_src/gtests/tests/fixtures/replication_api.h index 19e2cf9ce..d8e5e9924 100644 --- a/cpp_src/gtests/tests/fixtures/replication_api.h +++ b/cpp_src/gtests/tests/fixtures/replication_api.h @@ -1,15 +1,8 @@ #pragma once #include -#include "debug/backtrace.h" -#include "estl/fast_hash_set.h" -#include "reindexer_api.h" -#include "reindexertestapi.h" -#include "server/dbmanager.h" -#include "server/server.h" -#include "thread" -#include "tools/logger.h" -#include "tools/serializer.h" +#include "estl/shared_mutex.h" +#include "servercontrol.h" using namespace reindexer_server; @@ -50,7 +43,7 @@ class ReplicationApi : public ::testing::Test { void SetOptmizationSortWorkers(size_t id, size_t cnt, std::string_view nsName); size_t masterId_ = 0; - shared_timed_mutex restartMutex_; + reindexer::shared_timed_mutex restartMutex_; private: const std::string kStoragePath = reindexer::fs::JoinPath(reindexer::fs::GetTempDir(), "reindex_repl_test/"); diff --git a/cpp_src/gtests/tests/fixtures/replication_load_api.h b/cpp_src/gtests/tests/fixtures/replication_load_api.h index 1a43953d0..e21b72cf4 100644 --- a/cpp_src/gtests/tests/fixtures/replication_load_api.h +++ b/cpp_src/gtests/tests/fixtures/replication_load_api.h @@ -7,9 +7,9 @@ class ReplicationLoadApi : public ReplicationApi { public: - class UpdatesReciever : public IUpdatesObserver { + class UpdatesReciever : public reindexer::IUpdatesObserver { public: - void OnWALUpdate(LSNPair, std::string_view nsName, const WALRecord &) override final { + void OnWALUpdate(reindexer::LSNPair, std::string_view nsName, const reindexer::WALRecord &) override final { std::lock_guard lck(mtx_); auto found = updatesCounters_.find(nsName); if (found != updatesCounters_.end()) { @@ -21,7 +21,7 @@ class ReplicationLoadApi : public ReplicationApi { void OnConnectionState(const Error &) override final {} void OnUpdatesLost(std::string_view) override final {} - using map = tsl::hopscotch_map; + using map = tsl::hopscotch_map; map Counters() const { std::lock_guard lck(mtx_); @@ -77,7 +77,7 @@ class ReplicationLoadApi : public ReplicationApi { auto srv = GetSrv(masterId_); auto &api = srv->api; - shared_lock lk(restartMutex_); + reindexer::shared_lock lk(restartMutex_); for (size_t i = 0; i < count; ++i) { BaseApi::ItemType item = api.NewItem("some"); @@ -96,7 +96,7 @@ class ReplicationLoadApi : public ReplicationApi { } } BaseApi::QueryResultsType SimpleSelect(size_t num) { - Query qr = Query("some"); + reindexer::Query qr("some"); auto srv = GetSrv(num); auto &api = srv->api; BaseApi::QueryResultsType res(api.reindexer.get()); @@ -109,7 +109,7 @@ class ReplicationLoadApi : public ReplicationApi { auto srv = GetSrv(masterId_); auto &api = srv->api; BaseApi::QueryResultsType res(api.reindexer.get()); - auto err = api.reindexer->Delete(Query("some"), res); + auto err = api.reindexer->Delete(reindexer::Query("some"), res); EXPECT_TRUE(err.ok()) << err.what(); return res; } @@ -152,7 +152,7 @@ class ReplicationLoadApi : public ReplicationApi { auto srv = GetSrv(i); auto &api = srv->api; BaseApi::QueryResultsType res(api.reindexer.get()); - auto err = api.reindexer->Select(Query(ns), res); + auto err = api.reindexer->Select(reindexer::Query(ns), res); EXPECT_TRUE(err.ok()) << err.what(); versions.emplace_back(res.getTagsMatcher(0).version()); } @@ -180,8 +180,8 @@ class ReplicationLoadApi : public ReplicationApi { void ValidateSchemas(std::string_view ns, std::string_view expected) { for (size_t i = 0; i < GetServersCount(); i++) { auto srv = GetSrv(i); - std::vector nsDefs; - auto err = srv->api.reindexer->EnumNamespaces(nsDefs, EnumNamespacesOpts().WithFilter(ns)); + std::vector nsDefs; + auto err = srv->api.reindexer->EnumNamespaces(nsDefs, reindexer::EnumNamespacesOpts().WithFilter(ns)); EXPECT_TRUE(err.ok()) << err.what(); ASSERT_EQ(nsDefs.size(), 1) << "Namespace does not exist: " << ns; EXPECT_EQ(nsDefs[0].name, ns); diff --git a/cpp_src/gtests/tests/unit/composite_indexes_test.cc b/cpp_src/gtests/tests/unit/composite_indexes_test.cc index 93cdd4dd5..baefff01d 100644 --- a/cpp_src/gtests/tests/unit/composite_indexes_test.cc +++ b/cpp_src/gtests/tests/unit/composite_indexes_test.cc @@ -220,7 +220,6 @@ TEST_F(CompositeIndexesApi, SelectsBySubIndexes) { err = rt.reindexer->OpenNamespace(default_namespace); ASSERT_TRUE(err.ok()) << c.name; DefineNamespaceDataset(default_namespace, c.idxs); - std::string compositeIndexName(getCompositeIndexName({kFieldNamePrice, kFieldNamePages})); addCompositeIndex({kFieldNamePrice, kFieldNamePages}, CompositeIndexHash, IndexOpts()); int priceValue = 77777, pagesValue = 88888, bookid = 300; diff --git a/cpp_src/gtests/tests/unit/equalposition_tests.cc b/cpp_src/gtests/tests/unit/equalposition_tests.cc index f3c46380e..4e0e884b1 100644 --- a/cpp_src/gtests/tests/unit/equalposition_tests.cc +++ b/cpp_src/gtests/tests/unit/equalposition_tests.cc @@ -232,8 +232,6 @@ TEST_F(EqualPositionApi, EmptyCompOpErr) { EXPECT_TRUE(item.Status().ok()) << item.Status().what(); char json[1024]; - std::string pk("pk" + std::to_string(i)); - snprintf(json, sizeof(json) - 1, jsonPattern, i); err = item.FromJSON(json); diff --git a/cpp_src/gtests/tests/unit/join_test.cc b/cpp_src/gtests/tests/unit/join_test.cc index 13dee5c00..f143594cc 100644 --- a/cpp_src/gtests/tests/unit/join_test.cc +++ b/cpp_src/gtests/tests/unit/join_test.cc @@ -896,3 +896,138 @@ TEST_F(JoinOnConditionsApi, TestInvalidConditions) { err = rt.reindexer->Select(Query(books_namespace).InnerJoin(authorid_fk, authorid, CondLike, Query(authors_namespace)), qr); EXPECT_FALSE(err.ok()); } + +void CheckJoinIds(std::map>> ids, const QueryResults& qr) { + ASSERT_EQ(ids.size(), qr.Count()); + for (auto it : qr) { + { + const auto item = it.GetItem(); + const int id = item["id"].Get(); + const auto idIt = ids.find(id); + ASSERT_NE(idIt, ids.end()) << id; + + const auto joined = it.GetJoined(); + const auto& joinedIds = idIt->second; + ASSERT_EQ(joinedIds.size(), joined.getJoinedFieldsCount()); + for (size_t i = 0; i < joinedIds.size(); ++i) { + const auto& joinedItems = joined.at(i); + const auto& joinedIdsSet = joinedIds[i]; + ASSERT_EQ(joinedIds[i].size(), joinedItems.ItemsCount()); + for (size_t j = 0; j < joinedIdsSet.size(); ++j) { + const auto nsId = joinedItems[j].Nsid(); + auto itemImpl = joined.at(i).GetItem(j, qr.getPayloadType(nsId), qr.getTagsMatcher(nsId)); + const int joinedId = Item::FieldRefByName("id", itemImpl).Get(); + EXPECT_NE(joinedIdsSet.find(joinedId), joinedIdsSet.end()) << joinedId; + } + } + } + + { + reindexer::WrSerializer ser; + const auto err = it.GetJSON(ser, false); + ASSERT_TRUE(err.ok()) << err.what(); + gason::JsonParser parser; + const auto mainNode = parser.Parse(ser.Slice()); + const int id = mainNode["id"].As(); + const auto idIt = ids.find(id); + ASSERT_NE(idIt, ids.end()) << id; + + for (size_t i = 0, s = idIt->second.size(); i < s; ++i) { + auto& joinedIds = idIt->second[i]; + const std::string joinedFieldName = "joined_" + (idIt->second.size() == 1 ? "" : std::to_string(i + 1) + '_') + "join_ns"; + size_t found = 0; + for (const auto joinedNode : mainNode[joinedFieldName]) { + const int joinedId = joinedNode["id"].As(); + const auto joinedIdIt = joinedIds.find(joinedId); + EXPECT_NE(joinedIdIt, joinedIds.end()) << joinedFieldName << ' ' << joinedId; + found += (joinedIdIt != joinedIds.end()); + } + EXPECT_EQ(joinedIds.size(), found) << joinedFieldName; + } + } + } +} + +TEST_F(JoinSelectsApi, SeveralJoinsByTheSameNs) { + const std::string_view mainNs = "main_ns"; + const std::string_view joinNs = "join_ns"; + Error err = rt.reindexer->OpenNamespace(mainNs); + ASSERT_TRUE(err.ok()) << err.what(); + DefineNamespaceDataset(mainNs, {IndexDeclaration{"id", "hash", "int", IndexOpts().PK(), 0}}); + + { + Item mainItem = NewItem(mainNs); + err = mainItem.FromJSON(R"({"id": 0, "join_id": 2})"); + ASSERT_TRUE(err.ok()) << err.what(); + Upsert(mainNs, mainItem); + } + + { + Item mainItem = NewItem(mainNs); + err = mainItem.FromJSON(R"({"id": 1, "join_id": 3})"); + ASSERT_TRUE(err.ok()) << err.what(); + Upsert(mainNs, mainItem); + } + + err = rt.reindexer->OpenNamespace(joinNs); + ASSERT_TRUE(err.ok()) << err.what(); + DefineNamespaceDataset(joinNs, {IndexDeclaration{"id", "hash", "int", IndexOpts().PK(), 0}}); + + { + Item joinItem = NewItem(joinNs); + joinItem["id"] = 0; + Upsert(joinNs, joinItem); + } + + { + Item joinItem = NewItem(joinNs); + joinItem["id"] = 1; + Upsert(joinNs, joinItem); + } + + { + Item joinItem = NewItem(joinNs); + joinItem["id"] = 2; + Upsert(joinNs, joinItem); + } + + { + Item joinItem = NewItem(joinNs); + joinItem["id"] = 3; + Upsert(joinNs, joinItem); + } + + { + QueryResults qr; + err = rt.reindexer->Select(Query(mainNs).InnerJoin("id", "id", CondEq, Query(joinNs)), qr); + ASSERT_TRUE(err.ok()) << err.what(); + CheckJoinIds({{0, {{0}}}, {1, {{1}}}}, qr); + } + + { + QueryResults qr; + err = rt.reindexer->Select( + Query(mainNs).InnerJoin("id", "id", CondEq, Query(joinNs)).LeftJoin("join_id", "id", CondEq, Query(joinNs)), qr); + ASSERT_TRUE(err.ok()) << err.what(); + CheckJoinIds({{0, {{0}, {2}}}, {1, {{1}, {3}}}}, qr); + } + + { + QueryResults qr; + err = rt.reindexer->Select( + Query(mainNs).InnerJoin("id", "id", CondEq, Query(joinNs)).LeftJoin("join_id", "id", CondGe, Query(joinNs)), qr); + ASSERT_TRUE(err.ok()) << err.what(); + CheckJoinIds({{0, {{0}, {0, 1, 2}}}, {1, {{1}, {0, 1, 2, 3}}}}, qr); + } + + { + QueryResults qr; + err = rt.reindexer->Select(Query(mainNs) + .InnerJoin("id", "id", CondEq, Query(joinNs)) + .LeftJoin("join_id", "id", CondGe, Query(joinNs)) + .LeftJoin("join_id", "id", CondEq, Query(joinNs)), + qr); + ASSERT_TRUE(err.ok()) << err.what(); + CheckJoinIds({{0, {{0}, {0, 1, 2}, {2}}}, {1, {{1}, {0, 1, 2, 3}, {3}}}}, qr); + } +} diff --git a/cpp_src/gtests/tests/unit/namespace_test.cc b/cpp_src/gtests/tests/unit/namespace_test.cc index cefc09fb1..712c40ae2 100644 --- a/cpp_src/gtests/tests/unit/namespace_test.cc +++ b/cpp_src/gtests/tests/unit/namespace_test.cc @@ -710,9 +710,11 @@ TEST_F(NsApi, TestAddAndSetNonindexedField3) { addAndSetNonindexedField(rt.reindexer, default_namespace, "nested3.nested4.extrabonus"); } -static void setAndCheckArrayItem(const std::shared_ptr &reindexer, const std::string &ns, - const std::string &fullItemPath, const std::string &jsonPath, int i = IndexValueType::NotSet, +static void setAndCheckArrayItem(const std::shared_ptr &reindexer, std::string_view ns, std::string_view fullItemPath, + std::string_view jsonPath, std::string_view description, int i = IndexValueType::NotSet, int j = IndexValueType::NotSet) { + SCOPED_TRACE(description); + // Set array item to 777 QueryResults qrUpdate; Query updateQuery{Query(ns).Where("nested.bonus", CondGe, Variant(500)).Set(fullItemPath, static_cast(777))}; @@ -728,7 +730,8 @@ static void setAndCheckArrayItem(const std::shared_ptr &re // Check if array item with appropriate index equals to 777 and // is a type of Int64. - auto checkItem = [](const VariantArray &values, size_t index) { + auto checkItem = [](const VariantArray &values, size_t index, std::string_view description) { + SCOPED_TRACE(description); ASSERT_TRUE(index < values.size()); ASSERT_TRUE(values[index].Type().Is()); ASSERT_TRUE(values[index].As() == 777); @@ -742,21 +745,21 @@ static void setAndCheckArrayItem(const std::shared_ptr &re for (auto it : qrAll) { Item item = it.GetItem(false); checkIfItemJSONValid(it); - VariantArray values = item[jsonPath.c_str()]; + VariantArray values = item[jsonPath]; if (i == j && i == IndexValueType::NotSet) { - for (size_t i = 0; i < values.size(); ++i) { - checkItem(values, i); + for (size_t k = 0; k < values.size(); ++k) { + checkItem(values, k, description); } } else if (i == IndexValueType::NotSet) { for (int k = 0; k < kPricesSize; ++k) { - checkItem(values, k * kPricesSize + j); + checkItem(values, k * kPricesSize + j, description); } } else if (j == IndexValueType::NotSet) { for (int k = 0; k < kPricesSize; ++k) { - checkItem(values, i * kPricesSize + k); + checkItem(values, i * kPricesSize + k, description); } } else { - checkItem(values, i * kPricesSize + j); + checkItem(values, i * kPricesSize + j, description); } } } @@ -767,11 +770,14 @@ TEST_F(NsApi, TestAddAndSetArrayField) { // 3. Set array item(s) value to 777 and check if it was set properly DefineDefaultNamespace(); AddUnindexedData(); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[0].prices[2]", "nested.nested_array.prices", 0, 2); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[2].nested.array[1]", "nested.nested_array.nested.array", 0, - 1); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[2].nested.array[*]", "nested.nested_array.nested.array", 0); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[1].prices[*]", "nested.nested_array.prices", 1); + setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[0].prices[2]", "nested.nested_array.prices", + "TestAddAndSetArrayField 1 ", 0, 2); + setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[2].nested.array[1]", "nested.nested_array.nested.array", + "TestAddAndSetArrayField 2 ", 0, 1); + setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[2].nested.array[*]", "nested.nested_array.nested.array", + "TestAddAndSetArrayField 3 ", 0); + setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[1].prices[*]", "nested.nested_array.prices", + "TestAddAndSetArrayField 4 ", 1); } TEST_F(NsApi, TestAddAndSetArrayField2) { @@ -781,8 +787,9 @@ TEST_F(NsApi, TestAddAndSetArrayField2) { DefineDefaultNamespace(); AddUnindexedData(); setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[*].prices[0]", "nested.nested_array.prices", - IndexValueType::NotSet, 0); - setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[*].name", "nested.nested_array.name"); + "TestAddAndSetArrayField2 1 ", IndexValueType::NotSet, 0); + setAndCheckArrayItem(rt.reindexer, default_namespace, "nested.nested_array[*].name", "nested.nested_array.name", + "TestAddAndSetArrayField2 2 "); } TEST_F(NsApi, TestAddAndSetArrayField3) { @@ -1056,48 +1063,59 @@ TEST_F(NsApi, ExtendArrayWithExpressions) { } } -static void validateResults(const std::shared_ptr &reindexer, const Query &baseQuery, std::string_view ns, - const QueryResults &qr, std::string_view pattern, std::string_view field, const VariantArray &expectedValues, +static void validateResults(const std::shared_ptr &reindexer, const Query &baseQuery, const Query &testQuery, + std::string_view ns, std::string_view pattern, std::string_view field, const VariantArray &expectedValues, std::string_view description, int resCount = 5) { - const std::string fullDescription = "Description: " + std::string(description) + ";\n"; + SCOPED_TRACE(description); + + QueryResults qr; + auto err = reindexer->Update(testQuery, qr); + ASSERT_TRUE(err.ok()) << err.what(); + // Check initial result - ASSERT_EQ(qr.Count(), resCount) << fullDescription; + ASSERT_EQ(qr.Count(), resCount); std::vector initialResults; initialResults.reserve(qr.Count()); for (auto it : qr) { Item item = it.GetItem(false); checkIfItemJSONValid(it); const auto json = item.GetJSON(); - ASSERT_NE(json.find(pattern), std::string::npos) << fullDescription << "JSON: " << json << ";\npattern: " << pattern; + ASSERT_NE(json.find(pattern), std::string::npos) << "JSON: " << json << ";\npattern: " << pattern; initialResults.emplace_back(json); const VariantArray values = item[field]; - ASSERT_EQ(values.size(), expectedValues.size()) << fullDescription; - ASSERT_EQ(values.IsArrayValue(), expectedValues.IsArrayValue()) << fullDescription; + ASSERT_EQ(values.size(), expectedValues.size()); + ASSERT_EQ(values.IsArrayValue(), expectedValues.IsArrayValue()); for (size_t i = 0; i < values.size(); ++i) { ASSERT_TRUE(values[i].Type().IsSame(expectedValues[i].Type())) - << fullDescription << values[i].Type().Name() << "!=" << expectedValues[i].Type().Name(); - ASSERT_EQ(values[i], expectedValues[i]) << fullDescription; + << values[i].Type().Name() << "!=" << expectedValues[i].Type().Name(); + if (values[i].Type().IsSame(reindexer::KeyValueType::Null())) { + continue; + } + ASSERT_EQ(values[i], expectedValues[i]); } } // Check select results QueryResults qrSelect; const Query q = expectedValues.size() ? Query(ns).Where(std::string(field), CondAllSet, expectedValues) : baseQuery; - auto err = reindexer->Select(q, qrSelect); - ASSERT_TRUE(err.ok()) << fullDescription << err.what(); - ASSERT_EQ(qrSelect.Count(), qr.Count()) << fullDescription; + err = reindexer->Select(q, qrSelect); + ASSERT_TRUE(err.ok()) << err.what(); + ASSERT_EQ(qrSelect.Count(), qr.Count()); unsigned i = 0; for (auto it : qrSelect) { Item item = it.GetItem(false); checkIfItemJSONValid(it); const auto json = item.GetJSON(); - ASSERT_EQ(json, initialResults[i++]) << fullDescription; + ASSERT_EQ(json, initialResults[i++]); const VariantArray values = item[field]; - ASSERT_EQ(values.size(), expectedValues.size()) << fullDescription; - ASSERT_EQ(values.IsArrayValue(), expectedValues.IsArrayValue()) << fullDescription; + ASSERT_EQ(values.size(), expectedValues.size()); + ASSERT_EQ(values.IsArrayValue(), expectedValues.IsArrayValue()); for (size_t j = 0; j < values.size(); ++j) { ASSERT_TRUE(values[j].Type().IsSame(expectedValues[j].Type())) - << fullDescription << values[j].Type().Name() << "!=" << expectedValues[j].Type().Name(); - ASSERT_EQ(values[j], expectedValues[j]) << fullDescription; + << values[j].Type().Name() << "!=" << expectedValues[j].Type().Name(); + if (values[j].Type().IsSame(reindexer::KeyValueType::Null())) { + continue; + } + ASSERT_EQ(values[j], expectedValues[j]); } } } @@ -1109,64 +1127,39 @@ TEST_F(NsApi, ExtendEmptyArrayWithExpressions) { const Query kBaseQuery = Query(kEmptyArraysNs).Where("id", CondSet, {100, 105, 189, 113, 153}); { - const auto description = "append value to the empty indexed array"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("indexed_array_field || [99, 99, 99]"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", - "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", + "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, "append value to the empty indexed array"); } { - const auto description = "append empty array to the indexed array"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("indexed_array_field || []"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", - "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", + "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, "append empty array to the indexed array"); } { - const auto description = "append value to the empty non-indexed array"; const Query query = Query(kBaseQuery).Set("non_indexed_array_field", Variant("non_indexed_array_field || [88, 88]"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[88,88])", "non_indexed_array_field", - {Variant(int64_t(88)), Variant(int64_t(88))}, description); + {Variant(int64_t(88)), Variant(int64_t(88))}, "append value to the empty non-indexed array"); } { - const auto description = "append empty array to the non-indexed array"; const Query query = Query(kBaseQuery).Set("non_indexed_array_field", Variant("non_indexed_array_field || []"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[88,88])", "non_indexed_array_field", - {Variant(int64_t(88)), Variant(int64_t(88))}, description); + {Variant(int64_t(88)), Variant(int64_t(88))}, "append empty array to the non-indexed array"); } { - const auto description = "append empty array to the non-existing field"; const Query query = Query(kBaseQuery).Set("non_existing_field", Variant("non_existing_field || []"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[88,88],"non_existing_field":[])", - "non_existing_field", VariantArray().MarkArray(), description); + "non_existing_field", VariantArray().MarkArray(), "append empty array to the non-existing field"); } - { - const auto description = "append non-empty array to the non-existing field"; const Query query = Query(kBaseQuery).Set("non_existing_field1", Variant("non_existing_field1 || [546]"), true); - QueryResults qr; - Error err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); validateResults( - rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, + rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[88,88],"non_existing_field":[],"non_existing_field1":[546])", - "non_existing_field1", VariantArray{Variant(int64_t(546))}.MarkArray(), description); + "non_existing_field1", VariantArray{Variant(int64_t(546))}.MarkArray(), "append non-empty array to the non-existing field"); } } @@ -1184,79 +1177,47 @@ TEST_F(NsApi, ArrayRemove) { ASSERT_EQ(err.what(), "Only an array field is expected as first parameter of command 'array_remove_once/array_remove'"); } { - const auto description = "remove empty array from empty indexed array"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove(indexed_array_field, [])"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[],"non_indexed_array_field":[])", - "indexed_array_field", {}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[])", + "indexed_array_field", {}, "remove empty array from empty indexed array"); } { - const auto description = "remove all values from empty indexed array with append empty array"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove(indexed_array_field, [1, 99]) || []"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[],"non_indexed_array_field":[])", - "indexed_array_field", {}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[])", + "indexed_array_field", {}, "remove all values from empty indexed array with append empty array"); } { - const auto description = "remove non-used values from empty indexed array with append"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove(indexed_array_field, [1, 2, 3, 99]) || [99, 99, 99]"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", - "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", + "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, + "remove non-used values from empty indexed array with append"); } { - // negative: remove string value from indexed numeric array field const Query query = - Query(kBaseQuery).Set("indexed_array_field", Variant(std::string(R"(array_remove(indexed_array_field, ['test']))")), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_FALSE(err.ok()); - ASSERT_EQ(err.what(), "Can't convert 'test' to number"); - } - { - const auto description = "remove all values from indexed array with duplicates"; - const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove(indexed_array_field[0], [99, 1])"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[],"non_indexed_array_field":[])", - "indexed_array_field", {}, description); + Query(kBaseQuery) + .Set("indexed_array_field", Variant(std::string(R"(array_remove(indexed_array_field, ['test', '99']))")), true); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[])", + "indexed_array_field", {}, "remove string values from numeric indexed array"); } { - const auto description = "remove all values from empty indexed array with append"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove(indexed_array_field, [1]) || [4, 3, 3]"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[4,3,3],"non_indexed_array_field":[])", - "indexed_array_field", {Variant(4), Variant(3), Variant(3)}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[4,3,3],"non_indexed_array_field":[])", + "indexed_array_field", {Variant(4), Variant(3), Variant(3)}, + "remove all values from empty indexed array with append"); } { - const auto description = R"("remove used\non-used values from indexed array with append empty array")"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove(indexed_array_field, [2, 5, 3]) || []"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[4],"non_indexed_array_field":[])", - "indexed_array_field", VariantArray{Variant(4)}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[4],"non_indexed_array_field":[])", + "indexed_array_field", VariantArray{Variant(4)}, + "remove used/non-used values from indexed array with append empty array"); } { - const auto description = R"("remove items from indexed array by single value scalar")"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove(indexed_array_field, 4)"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[],"non_indexed_array_field":[])", - "indexed_array_field", {}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[])", + "indexed_array_field", {}, "remove items from indexed array by single value scalar"); } } @@ -1266,49 +1227,36 @@ TEST_F(NsApi, ArrayRemoveExtra) { const Query kBaseQuery = Query(kEmptyArraysNs).Where("id", CondSet, {100, 105, 189, 113, 153}); { - const auto description = "add array to empty non-indexed array"; const Query query = Query(kBaseQuery).Set("non_indexed_array_field", Variant("non_indexed_array_field || [99, 99, 99]"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[],"non_indexed_array_field":[99,99,99])", - "non_indexed_array_field", {Variant(int64_t(99)), Variant(int64_t(99)), Variant(int64_t(99))}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[99,99,99])", + "non_indexed_array_field", {Variant(int64_t(99)), Variant(int64_t(99)), Variant(int64_t(99))}, + "add array to empty non-indexed array"); } { - const auto description = "remove from yourself indexed array field (empty) with append non-indexed field"; const Query query = Query(kBaseQuery) .Set("indexed_array_field", Variant("array_remove(indexed_array_field, indexed_array_field) || non_indexed_array_field"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[99,99,99])", "indexed_array_field", - {Variant(99), Variant(99), Variant(99)}, description); + {Variant(99), Variant(99), Variant(99)}, + "remove from yourself indexed array field (empty) with append non-indexed field"); } { - const auto description = "remove from yourself indexed array field with append"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove(indexed_array_field, indexed_array_field) || [1,2]"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[1,2],"non_indexed_array_field":[99,99,99])", - "indexed_array_field", {Variant(1), Variant(2)}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, + R"("indexed_array_field":[1,2],"non_indexed_array_field":[99,99,99])", "indexed_array_field", + {Variant(1), Variant(2)}, "remove from yourself indexed array field with append"); } { - const auto description = - "mixed remove indexed array field with append remove in non-indexed field and append array (remove scalar)"; const Query query = Query(kBaseQuery) .Set("indexed_array_field", Variant("array_remove(indexed_array_field, 1) || array_remove_once(non_indexed_array_field, 99) || [3]"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[2,99,99,3],"non_indexed_array_field":[99,99,99])", "indexed_array_field", - {Variant(2), Variant(99), Variant(99), Variant(3)}, description); + {Variant(2), Variant(99), Variant(99), Variant(3)}, + "mixed remove indexed array field with append remove in non-indexed field and append array (remove scalar)"); } } @@ -1326,80 +1274,48 @@ TEST_F(NsApi, ArrayRemoveOnce) { ASSERT_EQ(err.what(), "Only an array field is expected as first parameter of command 'array_remove_once/array_remove'"); } { - const auto description = "remove once empty array from empty indexed array"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove_once(indexed_array_field, [])"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[],"non_indexed_array_field":[])", - "indexed_array_field", {}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[])", + "indexed_array_field", {}, "remove once empty array from empty indexed array"); } { - const auto description = "remove once values from empty indexed array"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove_once(indexed_array_field, [1, 99])"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[],"non_indexed_array_field":[])", - "indexed_array_field", {}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[])", + "indexed_array_field", {}, "remove once values from empty indexed array"); } { - const auto description = "remove once non-used values from empty indexed array with append"; const Query query = Query(kBaseQuery) .Set("indexed_array_field", Variant("array_remove_once(indexed_array_field, [1, 2, 3, 99]) || [99, 99, 99]"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", - "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", + "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, + "remove once non-used values from empty indexed array with append"); } { - // negative: remove once string value from indexed array - const Query query = - Query(kBaseQuery).Set("indexed_array_field", Variant(std::string(R"(array_remove_once(indexed_array_field, ['test']))")), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_FALSE(err.ok()); - ASSERT_EQ(err.what(), "Can't convert 'test' to number"); - } - { - // negative: remove once string value (scalar) from indexed array const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant(std::string(R"(array_remove_once(indexed_array_field, 'Boo'))")), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_FALSE(err.ok()); - ASSERT_EQ(err.what(), "Can't convert 'Boo' to number"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", + "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, + "remove once string non-used values from numeric indexed array"); } { - const auto description = "remove once empty array from non empty indexed array"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove_once(indexed_array_field, [])"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", - "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", + "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, + "remove once empty array from non empty indexed array"); } { - const auto description = "remove once non-used values from indexed array"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove_once(indexed_array_field, [1, 2, 3])"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", - "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99,99,99],"non_indexed_array_field":[])", + "indexed_array_field", {Variant(99), Variant(99), Variant(99)}, "remove once non-used values from indexed array"); } { - const auto description = "remove one value twice from indexed array with duplicates and with append empty array"; const Query query = Query(kBaseQuery).Set("indexed_array_field", Variant("array_remove_once(indexed_array_field, [99, 99]) || []"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[99],"non_indexed_array_field":[])", - "indexed_array_field", VariantArray{Variant(99)}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[99],"non_indexed_array_field":[])", + "indexed_array_field", VariantArray{Variant(99)}, + "remove one value twice from indexed array with duplicates and with append empty array"); } } @@ -1409,34 +1325,25 @@ TEST_F(NsApi, ArrayRemoveNonIndexed) { const Query kBaseQuery = Query(kEmptyArraysNs).Where("id", CondSet, {100, 105, 189, 113, 153}); { - const auto description = "add array to empty non-indexed array"; const Query query = Query(kBaseQuery).Set("non_indexed_array_field", Variant("non_indexed_array_field || [99, 99, 99]"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[],"non_indexed_array_field":[99,99,99])", - "non_indexed_array_field", {Variant(int64_t(99)), Variant(int64_t(99)), Variant(int64_t(99))}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[99,99,99])", + "non_indexed_array_field", {Variant(int64_t(99)), Variant(int64_t(99)), Variant(int64_t(99))}, + "add array to empty non-indexed array"); } { - const auto description = "remove scalar value from non-indexed array with append array"; const Query query = Query(kBaseQuery) .Set("non_indexed_array_field", Variant(std::string(R"(array_remove_once(non_indexed_array_field, '99') || [1, 2]))")), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[],"non_indexed_array_field":[99,99,1,2])", - "non_indexed_array_field", {Variant(int64_t(99)), Variant(int64_t(99)), Variant(int64_t(1)), Variant(int64_t(2))}, - description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, + R"("indexed_array_field":[],"non_indexed_array_field":[99,99,1,2])", "non_indexed_array_field", + {Variant(int64_t(99)), Variant(int64_t(99)), Variant(int64_t(1)), Variant(int64_t(2))}, + "remove value from non-indexed array with append array"); } { - const auto description = "remove with duplicates from non indexed array"; const Query query = Query(kBaseQuery).Set("non_indexed_array_field", Variant("array_remove(non_indexed_array_field, [99])"), true); - QueryResults qr; - const auto err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, kEmptyArraysNs, qr, R"("indexed_array_field":[],"non_indexed_array_field":[1,2])", - "non_indexed_array_field", {Variant(int64_t(1)), Variant(int64_t(2))}, description); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[1,2])", + "non_indexed_array_field", {Variant(int64_t(1)), Variant(int64_t(2))}, + "remove with duplicates from non indexed array"); } } @@ -1463,171 +1370,122 @@ TEST_F(NsApi, ArrayRemoveSparseStrings) { EXPECT_TRUE(err.ok()) << err.what(); Upsert(default_namespace, item); + Commit(default_namespace); - const Query kBaseQuery = Query(default_namespace).Where("id", CondEq, {1}); + constexpr int resCount = 1; + const Query kBaseQuery = Query(default_namespace).Where("id", CondEq, {resCount}); { const Query query = Query(kBaseQuery).Set("str_h_empty", Variant("array_remove_once(str_h_empty, [])"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["1","2","3","3"],"str_t_field":["11","22","33","33"],"str_h_empty":[])", "str_h_empty", - VariantArray().MarkArray(), "Step 1.1", 1); + VariantArray().MarkArray(), "Step 1.1", resCount); } { const Query query = Query(kBaseQuery).Set("str_h_empty", Variant("array_remove_once([], str_h_empty)"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["1","2","3","3"],"str_t_field":["11","22","33","33"],"str_h_empty":[])", "str_h_empty", - VariantArray().MarkArray(), "Step 1.2", 1); + VariantArray().MarkArray(), "Step 1.2", resCount); } { const Query query = Query(kBaseQuery).Set("str_h_empty", Variant("array_remove_once(str_h_empty, ['1'])"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["1","2","3","3"],"str_t_field":["11","22","33","33"],"str_h_empty":[])", "str_h_empty", - VariantArray().MarkArray(), "Step 1.3", 1); + VariantArray().MarkArray(), "Step 1.3", resCount); } { const Query query = Query(kBaseQuery).Set("str_h_empty", Variant("array_remove_once(str_h_empty, str_h_field)"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["1","2","3","3"],"str_t_field":["11","22","33","33"],"str_h_empty":[])", "str_h_empty", - VariantArray().MarkArray(), "Step 1.4", 1); + VariantArray().MarkArray(), "Step 1.4", resCount); } { const Query query = Query(kBaseQuery).Set("str_h_field", Variant("array_remove_once(str_h_field, str_h_empty)"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["1","2","3","3"],"str_t_field":["11","22","33","33"],"str_h_empty":[])", "str_h_field", - {Variant("1"), Variant("2"), Variant("3"), Variant("3")}, "Step 1.5", 1); + {Variant("1"), Variant("2"), Variant("3"), Variant("3")}, "Step 1.5", resCount); } { const Query query = Query(kBaseQuery).Set("str_h_empty", Variant("array_remove(str_h_field, ['1','3'])"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["1","2","3","3"],"str_t_field":["11","22","33","33"],"str_h_empty":["2"])", "str_h_empty", - VariantArray{Variant("2")}.MarkArray(), "Step 1.6", 1); + VariantArray{Variant("2")}.MarkArray(), "Step 1.6", resCount); } { const Query query = Query(kBaseQuery).Set("str_h_empty", Variant("array_remove(['1'], str_h_empty)"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["1","2","3","3"],"str_t_field":["11","22","33","33"],"str_h_empty":["1"])", "str_h_empty", - VariantArray{Variant("1")}.MarkArray(), "Step 1.7", 1); + VariantArray{Variant("1")}.MarkArray(), "Step 1.7", resCount); } { const Query query = Query(kBaseQuery).Set("str_h_field", Variant("array_remove(str_h_field, ['1','3','first']) || ['POCOMAXA']"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["2","POCOMAXA"],"str_t_field":["11","22","33","33"],"str_h_empty":["1"])", "str_h_field", - {Variant("2"), Variant("POCOMAXA")}, "Step 1.8", 1); + {Variant("2"), Variant("POCOMAXA")}, "Step 1.8", resCount); } { const Query query = Query(kBaseQuery).Set("str_t_empty", Variant("array_remove_once(str_t_empty, [])"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["2","POCOMAXA"],"str_t_field":["11","22","33","33"],"str_h_empty":["1"],"str_t_empty":[])", - "str_t_empty", VariantArray().MarkArray(), "Step 2.1", 1); + "str_t_empty", VariantArray().MarkArray(), "Step 2.1", resCount); } { const Query query = Query(kBaseQuery).Set("str_t_empty", Variant("array_remove_once(str_t_empty, ['1'])"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["2","POCOMAXA"],"str_t_field":["11","22","33","33"],"str_h_empty":["1"],"str_t_empty":[])", - "str_t_empty", VariantArray().MarkArray(), "Step 2.2", 1); + "str_t_empty", VariantArray().MarkArray(), "Step 2.2", resCount); } { const Query query = Query(kBaseQuery).Set("str_t_empty", Variant("array_remove_once(str_t_empty, str_t_field)"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["2","POCOMAXA"],"str_t_field":["11","22","33","33"],"str_h_empty":["1"],"str_t_empty":[])", - "str_t_empty", VariantArray().MarkArray(), "Step 2.3", 1); + "str_t_empty", VariantArray().MarkArray(), "Step 2.3", resCount); } { const Query query = Query(kBaseQuery).Set("str_t_empty", Variant("array_remove(str_t_empty, ['11','33','32'])"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["2","POCOMAXA"],"str_t_field":["11","22","33","33"],"str_h_empty":["1"],"str_t_empty":[])", - "str_t_empty", VariantArray().MarkArray(), "Step 2.4", 1); + "str_t_empty", VariantArray().MarkArray(), "Step 2.4", resCount); } { const Query query = Query(kBaseQuery).Set("str_t_empty", Variant("array_remove(['7'], str_t_empty)"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["2","POCOMAXA"],"str_t_field":["11","22","33","33"],"str_h_empty":["1"],"str_t_empty":["7"])", - "str_t_empty", VariantArray{Variant("7")}.MarkArray(), "Step 2.5", 1); + "str_t_empty", VariantArray{Variant("7")}.MarkArray(), "Step 2.5", resCount); } { const Query query = Query(kBaseQuery).Set("str_t_field", Variant("array_remove_once(str_t_field, ['11', '33', 'first']) || ['POCOMAXA']"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["2","POCOMAXA"],"str_t_field":["22","33","POCOMAXA"],"str_h_empty":["1"],"str_t_empty":["7"])", - "str_t_field", {Variant("22"), Variant("33"), Variant("POCOMAXA")}, "Step 2.6", 1); + "str_t_field", {Variant("22"), Variant("33"), Variant("POCOMAXA")}, "Step 2.6", resCount); } { const Query query = Query(kBaseQuery).Set("str_h_empty", Variant("array_remove_once(str_h_empty, str_t_empty) || ['007','XXX']"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); validateResults( - rt.reindexer, kBaseQuery, default_namespace, qr, + rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["2","POCOMAXA"],"str_t_field":["22","33","POCOMAXA"],"str_h_empty":["1","007","XXX"],"str_t_empty":["7"])", - "str_h_empty", {Variant("1"), Variant("007"), Variant("XXX")}, "Step 3.1", 1); + "str_h_empty", {Variant("1"), Variant("007"), Variant("XXX")}, "Step 3.1", resCount); } { const Query query = Query(kBaseQuery).Set("str_t_field", Variant("[ '7', 'XXX' ] || array_remove_once( str_t_field , str_h_field ) "), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); validateResults( - rt.reindexer, kBaseQuery, default_namespace, qr, + rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["2","POCOMAXA"],"str_t_field":["7","XXX","22","33"],"str_h_empty":["1","007","XXX"],"str_t_empty":["7"])", - "str_t_field", {Variant("7"), Variant("XXX"), Variant("22"), Variant("33")}, "Step 3.2", 1); + "str_t_field", {Variant("7"), Variant("XXX"), Variant("22"), Variant("33")}, "Step 3.2", resCount); } { const Query query = Query(kBaseQuery).Set("str_t_field", Variant("array_remove_once( str_t_field , '22' \t) "), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); validateResults( - rt.reindexer, kBaseQuery, default_namespace, qr, + rt.reindexer, kBaseQuery, query, default_namespace, R"("str_h_field":["2","POCOMAXA"],"str_t_field":["7","XXX","33"],"str_h_empty":["1","007","XXX"],"str_t_empty":["7"])", - "str_t_field", {Variant("7"), Variant("XXX"), Variant("33")}, "Step 3.3", 1); + "str_t_field", {Variant("7"), Variant("XXX"), Variant("33")}, "Step 3.3", resCount); } } @@ -1645,42 +1503,33 @@ TEST_F(NsApi, ArrayRemoveSparseDoubles) { EXPECT_TRUE(err.ok()) << err.what(); Upsert(default_namespace, item); + Commit(default_namespace); - const Query kBaseQuery = Query(default_namespace).Where("id", CondEq, {1}); + constexpr int resCount = 1; + const Query kBaseQuery = Query(default_namespace).Where("id", CondEq, {resCount}); { const Query query = Query(kBaseQuery).Set("double_empty", Variant("array_remove(double_empty, [])"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, R"("double_field":[1.11,2.22,3.33,3.33],"double_empty":[])", - "double_empty", VariantArray{}.MarkArray(), "Step 1.1", 1); + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("double_field":[1.11,2.22,3.33,3.33],"double_empty":[])", + "double_empty", VariantArray{}.MarkArray(), "ArrayRemoveSparseDoubles Step 1.1", resCount); } { const Query query = Query(kBaseQuery).Set("double_empty", Variant("array_remove_once(double_empty, double_field) || [0.07]"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, R"("double_field":[1.11,2.22,3.33,3.33],"double_empty":[0.07])", - "double_empty", VariantArray{Variant(0.07)}.MarkArray(), "Step 1.2", 1); + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("double_field":[1.11,2.22,3.33,3.33],"double_empty":[0.07])", + "double_empty", VariantArray{Variant(0.07)}.MarkArray(), "ArrayRemoveSparseDoubles Step 1.2", resCount); } { const Query query = Query(kBaseQuery).Set("double_field", Variant("[7.77] || array_remove(double_field, double_empty)"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("double_field":[7.77,1.11,2.22,3.33,3.33],"double_empty":[0.07])", "double_field", - {Variant(7.77), Variant(1.11), Variant(2.22), Variant(3.33), Variant(3.33)}, "Step 1.3", 1); + {Variant(7.77), Variant(1.11), Variant(2.22), Variant(3.33), Variant(3.33)}, "ArrayRemoveSparseDoubles Step 1.3", + resCount); } { const Query query = Query(kBaseQuery).Set("double_field", Variant("array_remove_once(double_field, [3.33,3.33,1.11,99])"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, R"("double_field":[7.77,2.22],"double_empty":[0.07])", - "double_field", {Variant(7.77), Variant(2.22)}, "Step 1.4", 1); + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("double_field":[7.77,2.22],"double_empty":[0.07])", + "double_field", {Variant(7.77), Variant(2.22)}, "ArrayRemoveSparseDoubles Step 1.4", resCount); } } @@ -1698,43 +1547,33 @@ TEST_F(NsApi, ArrayRemoveSparseBooleans) { EXPECT_TRUE(err.ok()) << err.what(); Upsert(default_namespace, item); + Commit(default_namespace); - const Query kBaseQuery = Query(default_namespace).Where("id", CondEq, {1}); + constexpr int resCount = 1; + const Query kBaseQuery = Query(default_namespace).Where("id", CondEq, {resCount}); { const Query query = Query(kBaseQuery).Set("bool_empty", Variant("array_remove(bool_empty, [])"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, R"("bool_field":[true,true,false,false],"bool_empty":[])", - "bool_empty", VariantArray().MarkArray(), "Step 1.1", 1); + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("bool_field":[true,true,false,false],"bool_empty":[])", + "bool_empty", VariantArray().MarkArray(), "ArrayRemoveSparseBooleans Step 1.1", resCount); } { const Query query = Query(kBaseQuery).Set("bool_empty", Variant("array_remove_once(bool_empty, bool_field) || [1]"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, R"("bool_field":[true,true,false,false],"bool_empty":[true])", - "bool_empty", VariantArray{Variant(true)}.MarkArray(), "Step 1.2", 1); + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("bool_field":[true,true,false,false],"bool_empty":[true])", + "bool_empty", VariantArray{Variant(true)}.MarkArray(), "ArrayRemoveSparseBooleans Step 1.2", resCount); } { const Query query = Query(kBaseQuery).Set("bool_field", Variant("array_remove_once(bool_field, bool_empty)"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, R"("bool_field":[true,false,false],"bool_empty":[true])", - "bool_field", {Variant(true), Variant(false), Variant(false)}, "Step 1.3", 1); + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("bool_field":[true,false,false],"bool_empty":[true])", + "bool_field", {Variant(true), Variant(false), Variant(false)}, "ArrayRemoveSparseBooleans Step 1.3", resCount); } { const Query query = Query(kBaseQuery) .Set("bool_field", Variant("[true] || array_remove(bool_field, [false]) || array_remove_once(bool_empty, [0])"), true); - QueryResults qr; - err = rt.reindexer->Update(query, qr); - ASSERT_TRUE(err.ok()) << err.what(); - validateResults(rt.reindexer, kBaseQuery, default_namespace, qr, R"("bool_field":[true,true,true],"bool_empty":[true])", - "bool_field", {Variant(true), Variant(true), Variant(true)}, "Step 1.4", 1); + validateResults(rt.reindexer, kBaseQuery, query, default_namespace, R"("bool_field":[true,true,true],"bool_empty":[true])", + "bool_field", {Variant(true), Variant(true), Variant(true)}, "ArrayRemoveSparseBooleans Step 1.4", resCount); } } @@ -1765,6 +1604,7 @@ TEST_F(NsApi, ArrayRemoveSeveralJsonPathsField) { item[intField1] = sz + i; Upsert(testNS, item); + Commit(testNS); } @@ -1787,9 +1627,8 @@ TEST_F(NsApi, ArrayRemoveWithSql) { // 3. Remove from array_field with expression substantially { Query updateQuery = Query::FromSQL( - "update test_namespace set array_field = [0] || array_remove(array_field, [3,2,1]) || array_remove_once(indexed_array_field, " - "[99]) " - "|| [7,9]"); + "update test_namespace set array_field = [0] || array_remove(array_field, [3,2,1])" + " || array_remove_once(indexed_array_field, [99]) || [7,9]"); QueryResults qrUpdate; Error err = rt.reindexer->Update(updateQuery, qrUpdate); ASSERT_TRUE(err.ok()) << err.what(); @@ -1832,46 +1671,173 @@ TEST_F(NsApi, ArrayRemoveWithSql) { } } -TEST_F(NsApi, UpdateObjectsArray) { - // 1. Define NS - // 2. Fill NS - DefineDefaultNamespace(); - AddUnindexedData(); +static void validateUpdateJSONResults(const std::shared_ptr &reindexer, const Query &updateQuery, + std::string_view expectation, std::string_view description) { + SCOPED_TRACE(description); - // 3. Update object array and change one of it's items - Query updateQuery = - Query::FromSQL(R"(update test_namespace set nested.nested_array[1] = {"id":1,"name":"modified", "prices":[4,5,6]})"); - QueryResults qrUpdate; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); + QueryResults qr; + auto err = reindexer->Update(updateQuery, qr); ASSERT_TRUE(err.ok()) << err.what(); - // 4. Make sure nested.nested_array[1] is set to a new value properly - for (auto it : qrUpdate) { + std::vector initialResults; + initialResults.reserve(qr.Count()); + for (auto it : qr) { Item item = it.GetItem(false); checkIfItemJSONValid(it); - ASSERT_TRUE(item.GetJSON().find(R"({"id":1,"name":"modified","prices":[4,5,6]})") != std::string::npos); + const auto json = item.GetJSON(); + ASSERT_NE(json.find(expectation), std::string::npos) << "JSON: " << json << ";\nexpectation: " << expectation; + initialResults.emplace_back(json); + } + + // Check select results + QueryResults qrSelect; + err = reindexer->Select("SELECT * FROM test_namespace", qrSelect); + ASSERT_TRUE(err.ok()) << err.what(); + ASSERT_EQ(qrSelect.Count(), qr.Count()); + unsigned i = 0; + for (auto it : qrSelect) { + Item item = it.GetItem(false); + checkIfItemJSONValid(it); + const auto json = item.GetJSON(); + ASSERT_EQ(json, initialResults[i++]); } } +TEST_F(NsApi, UpdateObjectsArray) { + DefineDefaultNamespace(); + AddUnindexedData(); + + Query updateQuery = + Query::FromSQL(R"(update test_namespace set nested.nested_array[1] = {"id":1,"name":"modified", "prices":[4,5,6]})"); + validateUpdateJSONResults(rt.reindexer, updateQuery, R"({"id":1,"name":"modified","prices":[4,5,6]})", + "Make sure nested.nested_array[1] is set to a new value properly"); +} + TEST_F(NsApi, UpdateObjectsArray2) { - // 1. Define NS - // 2. Fill NS DefineDefaultNamespace(); AddUnindexedData(); - // 3. Set all items of the object array to a new value Query updateQuery = Query::FromSQL(R"(update test_namespace set nested.nested_array[*] = {"ein":1,"zwei":2, "drei":3})"); - QueryResults qrUpdate; - Error err = rt.reindexer->Update(updateQuery, qrUpdate); - ASSERT_TRUE(err.ok()) << err.what(); + validateUpdateJSONResults(rt.reindexer, updateQuery, + R"("nested_array":[{"ein":1,"zwei":2,"drei":3},{"ein":1,"zwei":2,"drei":3},{"ein":1,"zwei":2,"drei":3}]})", + "Make sure all items of nested.nested_array are set to a new value correctly"); +} - // 4. Make sure all items of nested.nested_array are set to a new value correctly - for (auto it : qrUpdate) { - Item item = it.GetItem(false); - checkIfItemJSONValid(it); - ASSERT_TRUE(item.GetJSON().find( - R"("nested_array":[{"ein":1,"zwei":2,"drei":3},{"ein":1,"zwei":2,"drei":3},{"ein":1,"zwei":2,"drei":3}]})") != - std::string::npos); +TEST_F(NsApi, UpdateHeterogeneousArray) { + const std::string kEmptyArraysNs = "empty_namespace"; + constexpr int resCount = 100; + CreateEmptyArraysNamespace(kEmptyArraysNs); + const Query kBaseQuery; // dummy + + /*{ // ToDo: issues #1469 #1721 + Query query = Query::FromSQL(R"(UPDATE empty_namespace SET non_indexed_array_field = [1, null])"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, + R"("indexed_array_field":[],"non_indexed_array_field":[1,null])", "non_indexed_array_field", + {Variant(int64_t(1)), Variant()}, "Checking set heterogeneous non-indexed array with null", resCount); + }*/ + { + Query query = Query::FromSQL(R"(UPDATE empty_namespace SET non_indexed_array_field = [1,-2,3])"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[1,-2,3])", + "non_indexed_array_field", {Variant(int64_t(1)), Variant(int64_t(-2)), Variant(int64_t(3))}, + "Set homogeneous non-indexed array", resCount); + + Query query2 = Query::FromSQL(R"(UPDATE empty_namespace SET non_indexed_array_field[1] = -505.6782)"); + validateResults(rt.reindexer, kBaseQuery, query2, kEmptyArraysNs, + R"("indexed_array_field":[],"non_indexed_array_field":[1,-505.6782,3])", "non_indexed_array_field", + {Variant(int64_t(1)), Variant(-505.6782), Variant(int64_t(3))}, + "Check the possibility of making a homogeneous indexed array heterogeneous", resCount); + } + { + Query query = Query::FromSQL(R"(UPDATE empty_namespace SET non_indexed_array_field = ['hi',true,'bro'])"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, + R"("indexed_array_field":[],"non_indexed_array_field":["hi",true,"bro"])", "non_indexed_array_field", + {Variant("hi"), Variant(true), Variant("bro")}, "Checking set heterogeneous non-indexed array", resCount); + } + { + Query query = Query::FromSQL("UPDATE empty_namespace SET non_indexed_array_field[1] = 3"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, + R"("indexed_array_field":[],"non_indexed_array_field":["hi",3,"bro"])", "non_indexed_array_field", + {Variant("hi"), Variant(int64_t(3)), Variant("bro")}, + "Checking overwrite in heterogeneous array one item via scalar value (middle)", resCount); + } + { + Query query = Query::FromSQL("UPDATE empty_namespace SET non_indexed_array_field[2] = 24"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, + R"("indexed_array_field":[],"non_indexed_array_field":["hi",3,24])", "non_indexed_array_field", + {Variant("hi"), Variant(int64_t(3)), Variant(int64_t(24))}, + "Checking overwrite in heterogeneous array one item via scalar value (last)", resCount); + } + { + Query query = Query::FromSQL("UPDATE empty_namespace SET non_indexed_array_field[0] = 81"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":[81,3,24])", + "non_indexed_array_field", {Variant(int64_t(81)), Variant(int64_t(3)), Variant(int64_t(24))}, + "Checking overwrite in heterogeneous array one item via scalar value (first)", resCount); + } + { + Query query = Query::FromSQL("UPDATE empty_namespace SET non_indexed_array_field = 183042"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":183042)", + "non_indexed_array_field", {Variant(int64_t(183042))}, + "Checking overwrite heterogeneous non-indexed array by single scalar value", resCount); + } + { + Query query = Query::FromSQL(R"(UPDATE empty_namespace SET non_indexed_array_field = ['pocomaxa','forever',true])"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, + R"("indexed_array_field":[],"non_indexed_array_field":["pocomaxa","forever",true])", "non_indexed_array_field", + {Variant("pocomaxa"), Variant("forever"), Variant(true)}, + "Checking overwrite non-indexed scalar with heterogeneous array", resCount); + } + { + Query query = Query::FromSQL(R"(UPDATE empty_namespace SET non_indexed_array_field = [3.14,9811,'Boom'])"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, + R"("indexed_array_field":[],"non_indexed_array_field":[3.14,9811,"Boom"])", "non_indexed_array_field", + {Variant(3.14), Variant(int64_t(9811)), Variant("Boom")}, + "Checking overwrite non-indexed array with heterogeneous array", resCount); + } + { + Query query = Query::FromSQL("UPDATE empty_namespace SET non_indexed_array_field = 3.14"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[],"non_indexed_array_field":3.14)", + "non_indexed_array_field", {Variant(3.14)}, + "Checking overwrite heterogeneous non-indexed array with scalar value (double)", resCount); + } + + { + Query query = Query::FromSQL(R"(UPDATE empty_namespace SET indexed_array_field = ['2',3])"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[2,3],"non_indexed_array_field":3.14)", + "indexed_array_field", {Variant(2), Variant(3)}, "Checking set heterogeneous indexed array with conversion", + resCount); + } + { + Query query = Query::FromSQL("UPDATE empty_namespace SET indexed_array_field = 4"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":4,"non_indexed_array_field":3.14)", + "indexed_array_field", {Variant(4)}, "Checking set heterogeneous indexed array with scalar value (int)", resCount); + } + { + Query query = Query::FromSQL(R"(UPDATE empty_namespace SET indexed_array_field = ['111',222,333])"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, + R"("indexed_array_field":[111,222,333],"non_indexed_array_field":3.14)", "indexed_array_field", + {Variant(111), Variant(222), Variant(333)}, "Checking overwrite scalar value field with heterogeneous array", + resCount); + } + { + const auto description = "Checking update of heterogeneous indexed array with invalid element - expected failure"; + Query query = Query::FromSQL(R"(UPDATE empty_namespace SET indexed_array_field = [555,'BOO'])"); + QueryResults qr; + auto err = rt.reindexer->Update(query, qr); + ASSERT_FALSE(err.ok()) << description; + ASSERT_EQ(err.what(), "Can't convert 'BOO' to number") << description; + } + { + Query query = Query::FromSQL(R"(UPDATE empty_namespace SET indexed_array_field[0] = '777')"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, + R"("indexed_array_field":[777,222,333],"non_indexed_array_field":3.14)", "indexed_array_field", + {Variant(777), Variant(222), Variant(333)}, + "Checking overwrite in heterogeneous indexed array one item via scalar value", resCount); + } + { + Query query = Query::FromSQL(R"(UPDATE empty_namespace SET indexed_array_field = ['333', 33])"); + validateResults(rt.reindexer, kBaseQuery, query, kEmptyArraysNs, R"("indexed_array_field":[333,33],"non_indexed_array_field":3.14)", + "indexed_array_field", {Variant(333), Variant(33)}, + "Checking overwrite indexed array field with heterogeneous array", resCount); } } @@ -1939,7 +1905,7 @@ TEST_F(NsApi, UpdateObjectsArray4) { Error err = rt.reindexer->TruncateNamespace(default_namespace); ASSERT_TRUE(err.ok()) << err.what(); // 2. Refill NS - AddHeterogeniousNestedData(); + AddHeterogeneousNestedData(); err = rt.reindexer->DropIndex(default_namespace, reindexer::IndexDef(kIndexName, {kIndexName}, "hash", "int64", IndexOpts().Array())); (void)err; // Error does not matter here @@ -1948,8 +1914,8 @@ TEST_F(NsApi, UpdateObjectsArray4) { IndexOpts().Array().Sparse(index == "sparse"))); ASSERT_TRUE(err.ok()) << err.what(); } - const std::string indexTypeMsg = fmt::sprintf("Index type is '%s'", index); + const std::string indexTypeMsg = fmt::sprintf("Index type is '%s' ", index); { const auto description = "Update array field, nested into objects array with explicit index (1 element)"; Query updateQuery = Query(kBaseQuery).Set("objects[0].array[0].field[4]", {777}, false); @@ -3323,3 +3289,27 @@ TEST_F(NsApi, IncorrectNsName) { }; check(variants, rename); } + +TEST_F(NsApi, TwistNullUpdate) { + Error err = rt.reindexer->OpenNamespace(default_namespace); + ASSERT_TRUE(err.ok()) << err.what(); + + DefineNamespaceDataset(default_namespace, {IndexDeclaration{idIdxName.c_str(), "hash", "int", IndexOpts().PK(), 0}, + IndexDeclaration{"array_idx", "hash", "int", IndexOpts().Array().Sparse(true), 0}}); + const std::string json = R"json({"id": 3, "array_idx": [1,1]}})json"; + + Item item = NewItem(default_namespace); + EXPECT_TRUE(item.Status().ok()) << item.Status().what(); + err = item.FromJSON(json); + EXPECT_TRUE(err.ok()) << err.what(); + Upsert(default_namespace, item); + Commit(default_namespace); + + Query query = Query::FromSQL("UPDATE test_namespace SET array_idx = [null, null, null] WHERE id=3"); + QueryResults qr; + err = rt.reindexer->Update(query, qr); + ASSERT_TRUE(err.ok()) << err.what(); + // second update - force read\parsing + err = rt.reindexer->Update(query, qr); + ASSERT_TRUE(err.ok()) << err.what(); +} diff --git a/cpp_src/gtests/tests/unit/replication_test.cc b/cpp_src/gtests/tests/unit/replication_test.cc index be7f76b5e..1913c409a 100644 --- a/cpp_src/gtests/tests/unit/replication_test.cc +++ b/cpp_src/gtests/tests/unit/replication_test.cc @@ -3,6 +3,8 @@ #include "replication_load_api.h" #include "replicator/walrecord.h" +using reindexer::Query; + // clang-format off constexpr std::string_view kReplTestSchema1 = R"xxx( { @@ -208,11 +210,11 @@ TEST_F(ReplicationLoadApi, WALResizeStaticData) { auto qrToSet = [](const BaseApi::QueryResultsType& qr) { std::unordered_set items; - WrSerializer ser; + reindexer::WrSerializer ser; for (auto& item : qr) { if (item.IsRaw()) { reindexer::WALRecord rec(item.GetRaw()); - EXPECT_EQ(rec.type, WalReplState); + EXPECT_EQ(rec.type, reindexer::WalReplState); } else { ser.Reset(); auto err = item.GetCJSON(ser, false); @@ -394,7 +396,7 @@ TEST_F(ReplicationLoadApi, DuplicatePKSlaveTest) { ASSERT_TRUE(err.ok()); ASSERT_EQ(qr.Count(), items.size()); for (auto i : qr) { - WrSerializer ser; + reindexer::WrSerializer ser; err = i.GetJSON(ser, false); gason::JsonParser parser; auto root = parser.Parse(ser.Slice()); diff --git a/cpp_src/net/http/router.cc b/cpp_src/net/http/router.cc index 034451499..df31b3d6b 100644 --- a/cpp_src/net/http/router.cc +++ b/cpp_src/net/http/router.cc @@ -143,7 +143,7 @@ int Context::File(int code, std::string_view path, std::string_view data, bool i std::string content; if (data.length() == 0) { - if (fs::ReadFile(isGzip ? std::string(path) + kGzSuffix : std::string(path), content) < 0) { + if (fs::ReadFile(isGzip ? std::string(path).append(kGzSuffix) : std::string(path), content) < 0) { return String(http::StatusNotFound, "File not found"); } } else { @@ -168,7 +168,7 @@ std::vector methodNames = {"GET"sv, "POST"sv, "OPTIONS"sv, "HE HttpMethod lookupMethod(std::string_view method) { for (auto &cm : methodNames) if (method == cm) return HttpMethod(&cm - &methodNames[0]); - return HttpMethod(-1); + return HttpMethod(kMethodUnknown); } int Router::handle(Context &ctx) { diff --git a/cpp_src/net/http/router.h b/cpp_src/net/http/router.h index 8f1fe2b97..b25d12d56 100644 --- a/cpp_src/net/http/router.h +++ b/cpp_src/net/http/router.h @@ -50,7 +50,8 @@ enum HttpStatusCode { }; enum HttpMethod : int { - kMethodGET, + kMethodUnknown = -1, + kMethodGET = 0, kMethodPOST, kMethodOPTIONS, kMethodHEAD, @@ -146,7 +147,7 @@ struct ClientData { virtual ~ClientData() = default; }; -static const std::string kGzSuffix(".gz"); +static constexpr std::string_view kGzSuffix(".gz"); struct Context { int JSON(int code, std::string_view slice); diff --git a/cpp_src/readme.md b/cpp_src/readme.md index 09047c647..59c2568a9 100644 --- a/cpp_src/readme.md +++ b/cpp_src/readme.md @@ -31,8 +31,9 @@ While using docker, you may pass reindexer server config options via envinronmen - `RX_PPROF` - if RX_PPROF is not empty, enables pprof api. Disabled by default. - `RX_SECURITY` - if RX_SECURITY is not empty, enables authorization. Disabled by default. - `RX_PROMETHEUS` - if RX_PROMETHEUS is not empty, enables prometheus metrics. Disabled by default. -- `RX_RPC_QR_IDLE_TIMEOUT` - RPC query results idle timeout (in seconds). Default value is 0 (timeout disabled). -- `RX_DISABLE_NS_LEAK` - Disables namespaces memory leak on database destruction (will slow down server's termination) +- `RX_RPC_QR_IDLE_TIMEOUT` - RPC query results idle timeout (in seconds). Default value is `0` (timeout disabled). +- `RX_DISABLE_NS_LEAK` - Disables namespaces memory leak on database destruction (will slow down server's termination). +- `RX_MAX_HTTP_REQ` - allows to configure max HTTP request size (in bytes). Default value is `2097152` (= 2 MB). `0` means 'unlimited'. ## Linux diff --git a/cpp_src/reindexer-config-version.cmake.in b/cpp_src/reindexer-config-version.cmake.in index c13a2c750..cd5d8f3b9 100644 --- a/cpp_src/reindexer-config-version.cmake.in +++ b/cpp_src/reindexer-config-version.cmake.in @@ -4,11 +4,11 @@ if (PACKAGE_VERSION VERSION_EQUAL PACKAGE_FIND_VERSION) set(PACKAGE_VERSION_EXACT TRUE) set(PACKAGE_VERSION_COMPATIBLE TRUE) set(PACKAGE_VERSION_UNSUITABLE FALSE) -elseif (PACKAGE_VERSION VERSION_GREATER PACKAGE_FIND_VERSION) +elseif(PACKAGE_VERSION VERSION_GREATER PACKAGE_FIND_VERSION) set(PACKAGE_VERSION_EXACT FALSE) set(PACKAGE_VERSION_COMPATIBLE TRUE) set(PACKAGE_VERSION_UNSUITABLE FALSE) -else () +else() set(PACKAGE_VERSION_EXACT FALSE) set(PACKAGE_VERSION_COMPATIBLE FALSE) set(PACKAGE_VERSION_UNSUITABLE TRUE) diff --git a/cpp_src/server/CMakeLists.txt b/cpp_src/server/CMakeLists.txt index 035b2cfcd..caa18f1a6 100644 --- a/cpp_src/server/CMakeLists.txt +++ b/cpp_src/server/CMakeLists.txt @@ -72,7 +72,7 @@ file(WRITE ${PROJECT_BINARY_DIR}/make_face.cmake if (LINK_RESOURCES) include(CMakeRC) - message ("-- Downloading swagger html assets...") + message("-- Downloading swagger html assets...") execute_process( COMMAND "${CMAKE_COMMAND}" -P "${PROJECT_BINARY_DIR}/make_swagger.cmake" RESULT_VARIABLE ret @@ -81,7 +81,7 @@ if (LINK_RESOURCES) if (NOT "${ret}" STREQUAL "0") message(FATAL_ERROR "Could not prepare 'swagger' files. Reason: ${err}") endif() - message ("-- Downloading face html assets...") + message("-- Downloading face html assets...") execute_process( COMMAND "${CMAKE_COMMAND}" -P "${PROJECT_BINARY_DIR}/make_face.cmake" RESULT_VARIABLE ret @@ -134,7 +134,7 @@ add_custom_target(swagger COMMAND "${CMAKE_COMMAND}" -P "${PROJECT_BINARY_DIR}/m if (NOT WIN32) if (GO_BUILTIN_SERVER_EXPORT_PKG_PATH AND NOT IS_ABSOLUTE ${GO_BUILTIN_SERVER_EXPORT_PKG_PATH}) set (GO_BUILTIN_SERVER_EXPORT_PKG_PATH "${CMAKE_CURRENT_SOURCE_DIR}/${GO_BUILTIN_SERVER_EXPORT_PKG_PATH}") - endif () + endif() if (GO_BUILTIN_SERVER_EXPORT_PKG_PATH AND EXISTS "${GO_BUILTIN_SERVER_EXPORT_PKG_PATH}/posix_config.go.in") ProcessorCount (cgo_proc_count) @@ -151,7 +151,7 @@ if (NOT WIN32) unset (cgo_cxx_flags) unset (cgo_c_flags) unset (cgo_ld_flags) - endif () + endif() install(FILES "${PROJECT_SOURCE_DIR}/server.h" @@ -201,5 +201,5 @@ else() @ONLY ) unset (cgo_ld_flags) - endif () -endif () + endif() +endif() diff --git a/cpp_src/server/config.cc b/cpp_src/server/config.cc index 53c42bd6f..ae1e94172 100644 --- a/cpp_src/server/config.cc +++ b/cpp_src/server/config.cc @@ -54,9 +54,6 @@ void ServerConfig::Reset() { AllocatorCachePart = -1; } -const std::string ServerConfig::kDedicatedThreading = "dedicated"; -const std::string ServerConfig::kSharedThreading = "shared"; - reindexer::Error ServerConfig::ParseYaml(const std::string &yaml) { Error err; try { diff --git a/cpp_src/server/config.h b/cpp_src/server/config.h index 70c100a32..02089a1e3 100644 --- a/cpp_src/server/config.h +++ b/cpp_src/server/config.h @@ -75,8 +75,8 @@ struct ServerConfig { int64_t AllocatorCacheLimit; float AllocatorCachePart; - static const std::string kDedicatedThreading; - static const std::string kSharedThreading; + constexpr static std::string_view kDedicatedThreading = "dedicated"; + constexpr static std::string_view kSharedThreading = "shared"; protected: Error fromYaml(YAML::Node& root); diff --git a/cpp_src/server/contrib/CMakeLists.txt b/cpp_src/server/contrib/CMakeLists.txt index 736f9d519..a99a3588a 100644 --- a/cpp_src/server/contrib/CMakeLists.txt +++ b/cpp_src/server/contrib/CMakeLists.txt @@ -42,4 +42,4 @@ if(python3) ) add_custom_target(query_json ALL DEPENDS ${QUERY_SCHEMA}) endif() -endif () +endif() diff --git a/cpp_src/server/contrib/server.md b/cpp_src/server/contrib/server.md index b4ea141a7..c47515f23 100644 --- a/cpp_src/server/contrib/server.md +++ b/cpp_src/server/contrib/server.md @@ -2717,7 +2717,7 @@ List of meta info of the specified namespace |**lazyload**
*optional*|Enable namespace lazy load (namespace should be loaded from disk on first call, not at reindexer startup)|boolean| |**log_level**
*optional*|Log level of queries core logger|enum (none, error, warning, info, trace)| |**max_iterations_idset_preresult**
*optional*|Maximum number of IdSet iterations of namespace preliminary result size for optimization
**Minimum value** : `201`
**Maximum value** : `2147483647`|integer| -|**max_preselect_part**
*optional*|Maximum preselect part of namespace's items for optimization of inner join by injection of filters. If max_preselect_part is 0, then only mmax_preselect_size will be used. If max_preselect_size is 0 and max_preselect_part is 0, optimization with preselect will not be applied. If max_preselect_size is 0 and max_preselect_part is 1.0, then the optimization will always be applied
**Default** : `0.1`
**Minimum value** : `0`
**Maximum value** : `1`|number (float)| +|**max_preselect_part**
*optional*|Maximum preselect part of namespace's items for optimization of inner join by injection of filters. If max_preselect_part is 0, then only max_preselect_size will be used. If max_preselect_size is 0 and max_preselect_part is 0, optimization with preselect will not be applied. If max_preselect_size is 0 and max_preselect_part is 1.0, then the optimization will always be applied
**Default** : `0.1`
**Minimum value** : `0`
**Maximum value** : `1`|number (float)| |**max_preselect_size**
*optional*|Maximum preselect size for optimization of inner join by injection of filters. If max_preselect_size is 0, then only max_preselect_part will be used. If max_preselect_size is 0 and max_preselect_part is 0, optimization with preselect will not be applied. If max_preselect_size is 0 and max_preselect_part is 1.0, then the optimization will always be applied
**Minimum value** : `0`|integer| |**min_preselect_size**
*optional*|Minimum preselect size for optimization of inner join by injection of filters. Min_preselect_size will be used as preselect limit if (max_preselect_part * ns.size) is less than this value
**Minimum value** : `0`|integer| |**namespace**
*optional*|Name of namespace, or `*` for setting to all namespaces|string| diff --git a/cpp_src/server/contrib/server.yml b/cpp_src/server/contrib/server.yml index 777dcab22..a472f64cb 100644 --- a/cpp_src/server/contrib/server.yml +++ b/cpp_src/server/contrib/server.yml @@ -4090,7 +4090,7 @@ definitions: default: 0.1 minimum: 0.0 maximum: 1.0 - description: "Maximum preselect part of namespace's items for optimization of inner join by injection of filters. If max_preselect_part is 0, then only mmax_preselect_size will be used. If max_preselect_size is 0 and max_preselect_part is 0, optimization with preselect will not be applied. If max_preselect_size is 0 and max_preselect_part is 1.0, then the optimization will always be applied" + description: "Maximum preselect part of namespace's items for optimization of inner join by injection of filters. If max_preselect_part is 0, then only max_preselect_size will be used. If max_preselect_size is 0 and max_preselect_part is 0, optimization with preselect will not be applied. If max_preselect_size is 0 and max_preselect_part is 1.0, then the optimization will always be applied" min_preselect_size: type: integer default: 1000 diff --git a/cpp_src/server/grpc/CMakeLists.txt b/cpp_src/server/grpc/CMakeLists.txt index 2448e79ca..1b88fd1ce 100644 --- a/cpp_src/server/grpc/CMakeLists.txt +++ b/cpp_src/server/grpc/CMakeLists.txt @@ -62,9 +62,9 @@ file(GLOB GRPC_SRCS ./*.cc ${GENERATED_PROTO_DIR}/*.cc) if (APPLE) set (CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS "${CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS} -undefined dynamic_lookup") -elseif (CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") +elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") set (CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS "${CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS} -gz") -endif () +endif() add_library(${TARGET} SHARED ${GRPC_SRCS}) include_directories(${GENERATED_PROTO_DIR}) diff --git a/cpp_src/server/resources_wrapper.cc b/cpp_src/server/resources_wrapper.cc index ea5e58396..0e6e86442 100644 --- a/cpp_src/server/resources_wrapper.cc +++ b/cpp_src/server/resources_wrapper.cc @@ -12,7 +12,7 @@ DocumentStatus web::fsStatus(const std::string& target) { status.fstatus = reindexer::fs::Stat(webRoot_ + target); if (status.fstatus == reindexer::fs::StatError) { using reindexer::net::http::kGzSuffix; - status.fstatus = reindexer::fs::Stat(webRoot_ + target + kGzSuffix); + status.fstatus = reindexer::fs::Stat(std::string(webRoot_).append(target).append(kGzSuffix)); if (status.fstatus == reindexer::fs::StatFile) { status.isGzip = true; } @@ -29,7 +29,7 @@ DocumentStatus web::stat(const std::string& target) { if (table.find(target) != table.end()) { return reindexer::fs::StatFile; - } else if (table.find(target + kGzSuffix) != table.end()) { + } else if (table.find(std::string(target).append(kGzSuffix)) != table.end()) { return {reindexer::fs::StatFile, true}; } @@ -50,13 +50,13 @@ int web::file(Context& ctx, HttpStatusCode code, const std::string& target, bool using reindexer::net::http::kGzSuffix; const auto& table = cmrc::detail::table_instance(); - auto it = table.find(isGzip ? target + kGzSuffix : target); + auto it = table.find(isGzip ? std::string(target).append(kGzSuffix) : target); if (it == table.end()) { return ctx.String(reindexer::net::http::StatusNotFound, "File not found"); } - auto file_entry = cmrc::open(isGzip ? target + kGzSuffix : target); + auto file_entry = cmrc::open(isGzip ? std::string(target).append(kGzSuffix) : target); std::string_view slice(file_entry.begin(), std::distance(file_entry.begin(), file_entry.end())); return ctx.File(code, target, slice, isGzip, withCache); } diff --git a/cpp_src/tools/assertrx.h b/cpp_src/tools/assertrx.h index 32233c3fc..984245830 100644 --- a/cpp_src/tools/assertrx.h +++ b/cpp_src/tools/assertrx.h @@ -4,6 +4,10 @@ namespace reindexer { +[[noreturn]] void fail_throwrx(const char *assertion, const char *file, unsigned line, const char *function); + +#define throw_assert(expr) reindexer::fail_throwrx(#expr, __FILE__, __LINE__, __FUNCTION__) + #ifdef NDEBUG #define assertrx(e) ((void)0) #define assertrx_throw(e) ((void)0) @@ -13,12 +17,10 @@ namespace reindexer { // fail_assertrx can actually throw, but this exception can not be handled properly, // so it was marked as 'noexcept' for the optimization purposes [[noreturn]] void fail_assertrx(const char *assertion, const char *file, unsigned line, const char *function) noexcept; -[[noreturn]] void fail_throwrx(const char *assertion, const char *file, unsigned line, const char *function); #ifdef __cplusplus #define assertrx(expr) (rx_likely(static_cast(expr)) ? void(0) : reindexer::fail_assertrx(#expr, __FILE__, __LINE__, __FUNCTION__)) -#define assertrx_throw(expr) \ - (rx_likely(static_cast(expr)) ? void(0) : reindexer::fail_throwrx(#expr, __FILE__, __LINE__, __FUNCTION__)) +#define assertrx_throw(expr) (rx_likely(static_cast(expr)) ? void(0) : throw_assert(expr)) #endif // __cplusplus #ifndef RX_WITH_STDLIB_DEBUG diff --git a/cpp_src/tools/errors.h b/cpp_src/tools/errors.h index 10fea100a..bf545bb12 100644 --- a/cpp_src/tools/errors.h +++ b/cpp_src/tools/errors.h @@ -98,7 +98,7 @@ class Error { // TODO: Enable nodiscard once python binding will be updated #ifdef REINDEX_CORE_BUILD template - Error(ErrorCode code, const char *fmt, const Args &...args) noexcept : code_{code} { + RX_NO_INLINE Error(ErrorCode code, const char *fmt, const Args &...args) noexcept : code_{code} { if (code_ != errOK) { try { try { diff --git a/cpp_src/tools/frozen_str_tools.h b/cpp_src/tools/frozen_str_tools.h new file mode 100644 index 000000000..29075237b --- /dev/null +++ b/cpp_src/tools/frozen_str_tools.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include "stringstools.h" +#include "vendor/frozen/bits/hash_string.h" +#include "vendor/frozen/string.h" + +namespace frozen { + +template +constexpr std::size_t hash_ascii_string_nocase(const String& value, std::size_t seed) { + std::size_t d = (0x811c9dc5 ^ seed) * static_cast(0x01000193); + for (const auto& c : value) d = (d ^ (static_cast(c) | 0x20)) * static_cast(0x01000193); + return d >> 8; +} + +struct nocase_hash_str { + constexpr std::size_t operator()(std::string_view hs, std::size_t seed) const noexcept { return hash_ascii_string_nocase(hs, seed); } + constexpr std::size_t operator()(const frozen::string& hs, std::size_t seed) const noexcept { + return hash_ascii_string_nocase(hs, seed); + } +}; + +struct nocase_equal_str { + constexpr bool operator()(std::string_view lhs, std::string_view rhs) const noexcept { return reindexer::iequals(lhs, rhs); } + constexpr bool operator()(const frozen::string& lhs, std::string_view rhs) const noexcept { + return reindexer::iequals(std::string_view(lhs.data(), lhs.size()), rhs); + } + constexpr bool operator()(std::string_view lhs, const frozen::string& rhs) const noexcept { + return reindexer::iequals(lhs, std::string_view(rhs.data(), rhs.size())); + } + constexpr bool operator()(const frozen::string& lhs, const frozen::string& rhs) const noexcept { + return reindexer::iequals(std::string_view(lhs.data(), lhs.size()), std::string_view(rhs.data(), rhs.size())); + } +}; +} // namespace frozen diff --git a/cpp_src/tools/json2kv.cc b/cpp_src/tools/json2kv.cc index e7a7f1a15..74075cc44 100644 --- a/cpp_src/tools/json2kv.cc +++ b/cpp_src/tools/json2kv.cc @@ -77,6 +77,8 @@ Variant jsonValue2Variant(const gason::JsonValue &v, KeyValueType t, std::string } return Variant(variants); } + default: + throw Error(errLogic, "Error parsing json field '%s' - got unexpected tag: %d", fieldName, v.getTag()); } return Variant(); } diff --git a/cpp_src/tools/jsontools.cc b/cpp_src/tools/jsontools.cc index ef87e5f5a..33b3031f1 100644 --- a/cpp_src/tools/jsontools.cc +++ b/cpp_src/tools/jsontools.cc @@ -67,6 +67,11 @@ void jsonValueToString(gason::JsonValue o, WrSerializer &ser, int shift, int ind case gason::JSON_NULL: ser << "null"sv; break; + default: + // Do nothing for JSON_EMPTY + if (uint8_t(o.getTag()) != gason::JSON_EMPTY) { + throw Error(errLogic, "Unexpected json tag: %d", int(o.getTag())); + } } } diff --git a/cpp_src/tools/serializer.cc b/cpp_src/tools/serializer.cc index 934b30ddc..c7016b7c9 100644 --- a/cpp_src/tools/serializer.cc +++ b/cpp_src/tools/serializer.cc @@ -24,7 +24,7 @@ p_string Serializer::GetPSlice() { throw Error(errParseBin, "Binary buffer broken - %s failed: pos=%d,len=%d", type, pos_, len_); } -[[noreturn]] void Serializer::throwUnknowTypeError(std::string_view type) { +[[noreturn]] void Serializer::throwUnknownTypeError(std::string_view type) { throw Error(errParseBin, "Unknown type %s while parsing binary buffer", type); } @@ -62,7 +62,7 @@ void WrSerializer::VStringHelper::End() { if (ser_) { int size = ser_->len_ - pos_; if (size < 0) { - throw Error(errParseBin, "Size of object is unexpedetly negative: %d", size); + throw Error(errParseBin, "Size of object is unexpectedly negative: %d", size); } if (size == 0) { ser_->grow(1); diff --git a/cpp_src/tools/serializer.h b/cpp_src/tools/serializer.h index 91d57bb3e..8ad656da4 100644 --- a/cpp_src/tools/serializer.h +++ b/cpp_src/tools/serializer.h @@ -20,12 +20,15 @@ struct p_string; struct v_string_hdr; class chunk; +constexpr auto kTrueSV = std::string_view("true"); +constexpr auto kFalseSV = std::string_view("false"); + class Serializer { public: Serializer(const void *buf, size_t len) noexcept : buf_(static_cast(buf)), len_(len), pos_(0) {} - Serializer(std::string_view buf) noexcept : buf_(reinterpret_cast(buf.data())), len_(buf.length()), pos_(0) {} - bool Eof() const noexcept { return pos_ >= len_; } - [[nodiscard]] KeyValueType GetKeyValueType() { return KeyValueType::fromNumber(GetVarUint()); } + explicit Serializer(std::string_view buf) noexcept : buf_(reinterpret_cast(buf.data())), len_(buf.length()), pos_(0) {} + RX_ALWAYS_INLINE bool Eof() const noexcept { return pos_ >= len_; } + [[nodiscard]] RX_ALWAYS_INLINE KeyValueType GetKeyValueType() { return KeyValueType::fromNumber(GetVarUint()); } [[nodiscard]] Variant GetVariant() { const KeyValueType type = GetKeyValueType(); if (type.Is()) { @@ -40,7 +43,7 @@ class Serializer { return GetRawVariant(type); } } - [[nodiscard]] Variant GetRawVariant(KeyValueType type) { + [[nodiscard]] RX_ALWAYS_INLINE Variant GetRawVariant(KeyValueType type) { return type.EvaluateOneOf( [this](KeyValueType::Int) { return Variant(int(GetVarint())); }, [this](KeyValueType::Bool) { return Variant(bool(GetVarUint())); }, @@ -48,52 +51,52 @@ class Serializer { [this](KeyValueType::Double) { return Variant(GetDouble()); }, [this](KeyValueType::String) { return getPVStringVariant(); }, [](KeyValueType::Null) noexcept { return Variant(); }, [this](KeyValueType::Uuid) { return Variant{GetUuid()}; }, [this, &type](OneOf) -> Variant { - throwUnknowTypeError(type.Name()); + throwUnknownTypeError(type.Name()); }); } - void SkipRawVariant(KeyValueType type) { + RX_ALWAYS_INLINE void SkipRawVariant(KeyValueType type) { type.EvaluateOneOf([this](KeyValueType::Int) { GetVarint(); }, [this](KeyValueType::Bool) { GetVarUint(); }, [this](KeyValueType::Int64) { GetVarint(); }, [this](KeyValueType::Double) { GetDouble(); }, [this](KeyValueType::String) { getPVStringPtr(); }, [](KeyValueType::Null) noexcept {}, [this](KeyValueType::Uuid) { GetUuid(); }, [this, &type](OneOf) { - throwUnknowTypeError(type.Name()); + throwUnknownTypeError(type.Name()); }); } - std::string_view GetSlice() { + RX_ALWAYS_INLINE std::string_view GetSlice() { auto l = GetUInt32(); std::string_view b(reinterpret_cast(buf_ + pos_), l); checkbound(pos_, b.size(), len_); pos_ += b.size(); return b; } - uint32_t GetUInt32() { + RX_ALWAYS_INLINE uint32_t GetUInt32() { uint32_t ret; checkbound(pos_, sizeof(ret), len_); memcpy(&ret, buf_ + pos_, sizeof(ret)); pos_ += sizeof(ret); return ret; } - uint64_t GetUInt64() { + RX_ALWAYS_INLINE uint64_t GetUInt64() { uint64_t ret; checkbound(pos_, sizeof(ret), len_); memcpy(&ret, buf_ + pos_, sizeof(ret)); pos_ += sizeof(ret); return ret; } - double GetDouble() { + RX_ALWAYS_INLINE double GetDouble() { double ret; checkbound(pos_, sizeof(ret), len_); memcpy(&ret, buf_ + pos_, sizeof(ret)); pos_ += sizeof(ret); return ret; } - Uuid GetUuid() { + RX_ALWAYS_INLINE Uuid GetUuid() { const uint64_t v1 = GetUInt64(); const uint64_t v2 = GetUInt64(); return Uuid{v1, v2}; } - int64_t GetVarint() { + RX_ALWAYS_INLINE int64_t GetVarint() { auto l = scan_varint(len_ - pos_, buf_ + pos_); if (l == 0) { using namespace std::string_view_literals; @@ -104,7 +107,7 @@ class Serializer { pos_ += l; return unzigzag64(parse_uint64(l, buf_ + pos_ - l)); } - uint64_t GetVarUint() { // -V1071 + RX_ALWAYS_INLINE uint64_t GetVarUint() { // -V1071 auto l = scan_varint(len_ - pos_, buf_ + pos_); if (l == 0) { using namespace std::string_view_literals; @@ -114,33 +117,33 @@ class Serializer { pos_ += l; return parse_uint64(l, buf_ + pos_ - l); } - [[nodiscard]] ctag GetCTag() { return ctag{GetVarUint()}; } - [[nodiscard]] carraytag GetCArrayTag() { return carraytag{GetUInt32()}; } - std::string_view GetVString() { + [[nodiscard]] RX_ALWAYS_INLINE ctag GetCTag() { return ctag{GetVarUint()}; } + [[nodiscard]] RX_ALWAYS_INLINE carraytag GetCArrayTag() { return carraytag{GetUInt32()}; } + RX_ALWAYS_INLINE std::string_view GetVString() { auto l = GetVarUint(); checkbound(pos_, l, len_); pos_ += l; - return std::string_view(reinterpret_cast(buf_ + pos_ - l), l); + return {reinterpret_cast(buf_ + pos_ - l), std::string_view::size_type(l)}; } p_string GetPVString(); p_string GetPSlice(); - [[nodiscard]] Uuid GetStrUuid() { return Uuid{GetVString()}; } - bool GetBool() { return bool(GetVarUint()); } - size_t Pos() const noexcept { return pos_; } - void SetPos(size_t p) noexcept { pos_ = p; } - const uint8_t *Buf() const noexcept { return buf_; } - size_t Len() const noexcept { return len_; } - void Reset() noexcept { pos_ = 0; } + [[nodiscard]] RX_ALWAYS_INLINE Uuid GetStrUuid() { return Uuid{GetVString()}; } + RX_ALWAYS_INLINE bool GetBool() { return bool(GetVarUint()); } + RX_ALWAYS_INLINE size_t Pos() const noexcept { return pos_; } + RX_ALWAYS_INLINE void SetPos(size_t p) noexcept { pos_ = p; } + RX_ALWAYS_INLINE const uint8_t *Buf() const noexcept { return buf_; } + RX_ALWAYS_INLINE size_t Len() const noexcept { return len_; } + RX_ALWAYS_INLINE void Reset() noexcept { pos_ = 0; } private: - void checkbound(uint64_t pos, uint64_t need, uint64_t len) { + RX_ALWAYS_INLINE void checkbound(uint64_t pos, uint64_t need, uint64_t len) { if (pos + need > len) { throwUnderflowError(pos, need, len); } } [[noreturn]] void throwUnderflowError(uint64_t pos, uint64_t need, uint64_t len); [[noreturn]] void throwScanIntError(std::string_view type); - [[noreturn]] void throwUnknowTypeError(std::string_view type); + [[noreturn]] void throwUnknownTypeError(std::string_view type); Variant getPVStringVariant(); const v_string_hdr *getPVStringPtr(); @@ -153,8 +156,8 @@ class WrSerializer { public: WrSerializer() noexcept : buf_(inBuf_), len_(0), cap_(sizeof(inBuf_)) {} template - WrSerializer(uint8_t (&buf)[N]) noexcept : buf_(buf), len_(0), cap_(N), hasExternalBuf_(true) {} - WrSerializer(chunk &&ch) noexcept : buf_(ch.release()), len_(ch.len()), cap_(ch.capacity()) { + explicit WrSerializer(uint8_t (&buf)[N]) noexcept : buf_(buf), len_(0), cap_(N), hasExternalBuf_(true) {} + explicit WrSerializer(chunk &&ch) noexcept : buf_(ch.release()), len_(ch.len()), cap_(ch.capacity()) { if (!buf_) { buf_ = inBuf_; cap_ = sizeof(inBuf_); @@ -205,7 +208,7 @@ class WrSerializer { } bool HasAllocatedBuffer() const noexcept { return buf_ != inBuf_ && !hasExternalBuf_; } - void PutKeyValueType(KeyValueType t) { PutVarUint(t.toNumber()); } + RX_ALWAYS_INLINE void PutKeyValueType(KeyValueType t) { PutVarUint(t.toNumber()); } void PutVariant(const Variant &kv) { PutKeyValueType(kv.Type()); kv.Type().EvaluateOneOf( @@ -231,7 +234,7 @@ class WrSerializer { } // Put slice with 4 bytes len header - void PutSlice(std::string_view slice) { + RX_ALWAYS_INLINE void PutSlice(std::string_view slice) { PutUInt32(slice.size()); grow(slice.size()); memcpy(&buf_[len_], slice.data(), slice.size()); @@ -290,40 +293,40 @@ class WrSerializer { size_t pos_; }; - SliceHelper StartSlice() { + RX_ALWAYS_INLINE SliceHelper StartSlice() { size_t savePos = len_; PutUInt32(0); - return SliceHelper(this, savePos); + return {this, savePos}; } - VStringHelper StartVString() { + RX_ALWAYS_INLINE VStringHelper StartVString() { size_t savePos = len_; - return VStringHelper(this, savePos); + return {this, savePos}; } // Put raw data - void PutUInt32(uint32_t v) { + RX_ALWAYS_INLINE void PutUInt32(uint32_t v) { grow(sizeof(v)); memcpy(&buf_[len_], &v, sizeof(v)); len_ += sizeof(v); } - void PutCArrayTag(carraytag atag) { PutUInt32(atag.asNumber()); } - void PutUInt64(uint64_t v) { + RX_ALWAYS_INLINE void PutCArrayTag(carraytag atag) { PutUInt32(atag.asNumber()); } + RX_ALWAYS_INLINE void PutUInt64(uint64_t v) { grow(sizeof(v)); memcpy(&buf_[len_], &v, sizeof(v)); len_ += sizeof(v); } - void PutDouble(double v) { + RX_ALWAYS_INLINE void PutDouble(double v) { grow(sizeof(v)); memcpy(&buf_[len_], &v, sizeof(v)); len_ += sizeof(v); } - void PutDoubleStrNoTrailing(double v) { + RX_ALWAYS_INLINE void PutDoubleStrNoTrailing(double v) { grow(32); len_ += double_to_str_no_trailing(v, reinterpret_cast(buf_ + len_), 32); } template ::value>::type * = nullptr> - WrSerializer &operator<<(T k) { + RX_ALWAYS_INLINE WrSerializer &operator<<(T k) { grow(32); char *b = i64toa(k, reinterpret_cast(buf_ + len_)); len_ = b - reinterpret_cast(buf_); @@ -352,7 +355,7 @@ class WrSerializer { } WrSerializer &operator<<(bool v) { using namespace std::string_view_literals; - Write(v ? "true"sv : "false"sv); + Write(v ? kTrueSV : kFalseSV); return *this; } WrSerializer &operator<<(double v) { @@ -376,59 +379,59 @@ class WrSerializer { void PrintJsonUuid(Uuid); void PrintHexDump(std::string_view str); - void Fill(char c, size_t count) { + RX_ALWAYS_INLINE void Fill(char c, size_t count) { grow(count); memset(&buf_[len_], c, count); len_ += count; } template > * = nullptr> - void PutVarint(T v) { + RX_ALWAYS_INLINE void PutVarint(T v) { grow(10); len_ += sint64_pack(v, buf_ + len_); } template > * = nullptr> - void PutVarUint(T v) { + RX_ALWAYS_INLINE void PutVarUint(T v) { grow(10); len_ += uint64_pack(v, buf_ + len_); } template > * = nullptr> - void PutVarint(T v) { + RX_ALWAYS_INLINE void PutVarint(T v) { grow(10); len_ += sint32_pack(v, buf_ + len_); } template > * = nullptr> - void PutVarUint(T v) { + RX_ALWAYS_INLINE void PutVarUint(T v) { grow(10); len_ += uint32_pack(v, buf_ + len_); } template > * = nullptr> - void PutVarUint(T v) { + RX_ALWAYS_INLINE void PutVarUint(T v) { assertrx(v >= 0 && v < 128); grow(1); buf_[len_++] = v; } - void PutCTag(ctag tag) { PutVarUint(tag.asNumber()); } - void PutBool(bool v) { + RX_ALWAYS_INLINE void PutCTag(ctag tag) { PutVarUint(tag.asNumber()); } + RX_ALWAYS_INLINE void PutBool(bool v) { grow(1); len_ += boolean_pack(v, buf_ + len_); } - void PutVString(std::string_view str) { + RX_ALWAYS_INLINE void PutVString(std::string_view str) { grow(str.size() + 10); len_ += string_pack(str.data(), str.size(), buf_ + len_); } void PutStrUuid(Uuid); - void PutUuid(Uuid uuid) { + RX_ALWAYS_INLINE void PutUuid(Uuid uuid) { PutUInt64(uuid[0]); PutUInt64(uuid[1]); } // Buffer manipulation functions - void Write(std::string_view slice) { + RX_ALWAYS_INLINE void Write(std::string_view slice) { grow(slice.size()); memcpy(&buf_[len_], slice.data(), slice.size()); len_ += slice.size(); } - uint8_t *Buf() const noexcept { return buf_; } + RX_ALWAYS_INLINE uint8_t *Buf() const noexcept { return buf_; } std::unique_ptr DetachBuf() { std::unique_ptr ret; @@ -471,7 +474,7 @@ class WrSerializer { hasExternalBuf_ = false; } } - std::string_view Slice() const noexcept { return std::string_view(reinterpret_cast(buf_), len_); } + RX_ALWAYS_INLINE std::string_view Slice() const noexcept { return {reinterpret_cast(buf_), len_}; } const char *c_str() noexcept { if (!len_ || buf_[len_] != 0) { grow(1); @@ -481,7 +484,7 @@ class WrSerializer { } protected: - void grow(size_t sz) { + RX_ALWAYS_INLINE void grow(size_t sz) { if (len_ + sz > cap_) { constexpr size_t kPageMask = ~size_t(0xFFF); const auto newCap = ((cap_ * 2) + sz); @@ -489,9 +492,9 @@ class WrSerializer { Reserve((newCap == newCapAligned) ? newCap : (newCapAligned + 0x1000)); } } - uint8_t *buf_; - size_t len_; - size_t cap_; + uint8_t *buf_ = nullptr; + size_t len_ = 0; + size_t cap_ = 0; uint8_t inBuf_[0x100]; bool hasExternalBuf_ = false; }; diff --git a/cpp_src/tools/stringstools.cc b/cpp_src/tools/stringstools.cc index df7ada725..6b81c646f 100644 --- a/cpp_src/tools/stringstools.cc +++ b/cpp_src/tools/stringstools.cc @@ -6,13 +6,14 @@ #include #include "atoi/atoi.h" -#include "estl/fast_hash_map.h" +#include "frozen_str_tools.h" #include "itoa/itoa.h" #include "tools/assertrx.h" #include "tools/randomgenerator.h" #include "tools/stringstools.h" #include "utf8cpp/utf8.h" #include "vendor/double-conversion/double-conversion.h" +#include "vendor/frozen/unordered_map.h" namespace reindexer { @@ -118,8 +119,9 @@ inline static char *strappend(char *dst, const char *src) noexcept { return dst; } -static fast_hash_map kStrictModes = { - {"", StrictModeNotSet}, {"none", StrictModeNone}, {"names", StrictModeNames}, {"indexes", StrictModeIndexes}}; +constexpr static auto kStrictModes = frozen::make_unordered_map( + {{"", StrictModeNotSet}, {"none", StrictModeNone}, {"names", StrictModeNames}, {"indexes", StrictModeIndexes}}, + frozen::nocase_hash_str{}, frozen::nocase_equal_str{}); } // namespace stringtools_impl @@ -548,8 +550,9 @@ bool validateUserNsName(std::string_view name) noexcept { } LogLevel logLevelFromString(std::string_view strLogLevel) { - static fast_hash_map levels = { - {"none", LogNone}, {"warning", LogWarning}, {"error", LogError}, {"info", LogInfo}, {"trace", LogTrace}}; + constexpr static auto levels = frozen::make_unordered_map( + {{"none", LogNone}, {"warning", LogWarning}, {"error", LogError}, {"info", LogInfo}, {"trace", LogTrace}}, + frozen::nocase_hash_str{}, frozen::nocase_equal_str{}); auto configLevelIt = levels.find(strLogLevel); if (configLevelIt != levels.end()) { diff --git a/cpp_src/tools/stringstools.h b/cpp_src/tools/stringstools.h index b5940efb0..3837f1f7b 100644 --- a/cpp_src/tools/stringstools.h +++ b/cpp_src/tools/stringstools.h @@ -19,9 +19,9 @@ namespace reindexer { std::string escapeString(std::string_view str); std::string unescapeString(std::string_view str); -[[nodiscard]] RX_ALWAYS_INLINE bool isalpha(char c) noexcept { return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); } -[[nodiscard]] RX_ALWAYS_INLINE bool isdigit(char c) noexcept { return (c >= '0' && c <= '9'); } -[[nodiscard]] RX_ALWAYS_INLINE char tolower(char c) noexcept { return (c >= 'A' && c <= 'Z') ? c + 'a' - 'A' : c; } +[[nodiscard]] RX_ALWAYS_INLINE constexpr bool isalpha(char c) noexcept { return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); } +[[nodiscard]] RX_ALWAYS_INLINE constexpr bool isdigit(char c) noexcept { return (c >= '0' && c <= '9'); } +[[nodiscard]] RX_ALWAYS_INLINE constexpr char tolower(char c) noexcept { return (c >= 'A' && c <= 'Z') ? c + 'a' - 'A' : c; } std::string toLower(std::string_view src); inline std::string_view skipSpace(std::string_view str) { size_t i = 0; @@ -134,9 +134,9 @@ template <> case CollateCustom: return collateCompare(lhs, rhs, collateOpts.sortOrderTable); case CollateNone: + default: return collateCompare(lhs, rhs, collateOpts.sortOrderTable); } - return collateCompare(lhs, rhs, collateOpts.sortOrderTable); } std::wstring utf8_to_utf16(std::string_view src); @@ -174,14 +174,14 @@ LogLevel logLevelFromString(std::string_view strLogLevel); StrictMode strictModeFromString(std::string_view strStrictMode); std::string_view strictModeToString(StrictMode mode); -inline bool iequals(std::string_view lhs, std::string_view rhs) noexcept { +inline constexpr bool iequals(std::string_view lhs, std::string_view rhs) noexcept { if (lhs.size() != rhs.size()) return false; for (auto itl = lhs.begin(), itr = rhs.begin(); itl != lhs.end() && itr != rhs.end();) { if (tolower(*itl++) != tolower(*itr++)) return false; } return true; } -inline bool iless(std::string_view lhs, std::string_view rhs) noexcept { +inline constexpr bool iless(std::string_view lhs, std::string_view rhs) noexcept { const auto len = std::min(lhs.size(), rhs.size()); for (size_t i = 0; i < len; ++i) { if (const auto l = tolower(lhs[i]), r = tolower(rhs[i]); l != r) { diff --git a/cpp_src/tools/varint.h b/cpp_src/tools/varint.h index 028c5fdc5..15536ece7 100644 --- a/cpp_src/tools/varint.h +++ b/cpp_src/tools/varint.h @@ -11,6 +11,8 @@ #include #include +#include "estl/defines.h" + /** * Return the ZigZag-encoded 32-bit unsigned integer form of a 32-bit signed * integer. @@ -24,7 +26,7 @@ inline uint32_t zigzag32(int32_t v) noexcept { if (v < 0) return (-(uint32_t)v) * 2 - 1; else - return (uint32_t)(v)*2; + return (uint32_t)(v) * 2; } /** @@ -40,7 +42,7 @@ inline uint64_t zigzag64(int64_t v) noexcept { if (v < 0) return (-(uint64_t)v) * 2 - 1; else - return (uint64_t)(v)*2; + return (uint64_t)(v) * 2; } /** @@ -240,20 +242,20 @@ inline uint64_t parse_uint64(unsigned len, const uint8_t *data) noexcept { return rv; } -inline int64_t unzigzag64(uint64_t v) noexcept { +RX_ALWAYS_INLINE int64_t unzigzag64(uint64_t v) noexcept { if (v & 1) return -(v >> 1) - 1; else return v >> 1; } -inline uint64_t parse_fixed_uint64(const uint8_t *data) noexcept { +RX_ALWAYS_INLINE uint64_t parse_fixed_uint64(const uint8_t *data) noexcept { uint64_t t; memcpy(&t, data, 8); return t; } -inline unsigned scan_varint(unsigned len, const uint8_t *data) noexcept { +RX_ALWAYS_INLINE unsigned scan_varint(unsigned len, const uint8_t *data) noexcept { unsigned i; if (len > 10) len = 10; for (i = 0; i < len; i++) diff --git a/cpp_src/vendor/args/args.hpp b/cpp_src/vendor/args/args.hpp index 027666a73..4f4cde75c 100644 --- a/cpp_src/vendor/args/args.hpp +++ b/cpp_src/vendor/args/args.hpp @@ -535,14 +535,14 @@ namespace args Hidden = HiddenFromUsage | HiddenFromDescription, }; - inline Options operator | (Options lhs, Options rhs) + inline int operator | (Options lhs, Options rhs) { - return static_cast(static_cast(lhs) | static_cast(rhs)); + return static_cast(lhs) | static_cast(rhs); } - inline Options operator & (Options lhs, Options rhs) + inline int operator & (Options lhs, Options rhs) { - return static_cast(static_cast(lhs) & static_cast(rhs)); + return static_cast(lhs) & static_cast(rhs); } class FlagBase; @@ -710,7 +710,7 @@ namespace args class Base { private: - Options options = {}; + int options = static_cast(Options::None); protected: bool matched = false; @@ -721,17 +721,18 @@ namespace args #endif public: - Base(const std::string &help_, Options options_ = {}) : options(options_), help(help_) {} + Base(const std::string &help_, Options options_ = {}) : Base(help_, static_cast(options_)) {} + Base(const std::string &help_, int options_) : options(options_), help(help_) {} virtual ~Base() {} - Options GetOptions() const noexcept + int GetOptions() const noexcept { return options; } bool IsRequired() const noexcept { - return (GetOptions() & Options::Required) != Options::None; + return (GetOptions() & static_cast(Options::Required)); } virtual bool Matched() const noexcept @@ -801,18 +802,18 @@ namespace args { if (kickout_) { - options = options | Options::KickOut; + options = options | static_cast(Options::KickOut); } else { - options = static_cast(static_cast(options) & ~static_cast(Options::KickOut)); + options = options & ~static_cast(Options::KickOut); } } /// Gets the kick-out value for building subparsers bool KickOut() const noexcept { - return (options & Options::KickOut) != Options::None; + return (options & static_cast(Options::KickOut)); } virtual void Reset() noexcept @@ -865,7 +866,8 @@ namespace args } public: - NamedBase(const std::string &name_, const std::string &help_, Options options_ = {}) : Base(help_, options_), name(name_) {} + NamedBase(const std::string &name_, const std::string &help_, Options options_ = {}) : NamedBase(name_, help_, static_cast(options_)) {} + NamedBase(const std::string &name_, const std::string &help_, int options_) : Base(help_, options_), name(name_) {} virtual ~NamedBase() {} /** Sets default value string that will be added to argument description. @@ -1041,7 +1043,8 @@ namespace args public: FlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, const bool extraError_ = false) : NamedBase(name_, help_, extraError_ ? Options::Single : Options()), matcher(std::move(matcher_)) {} - FlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_) : NamedBase(name_, help_, options_), matcher(std::move(matcher_)) {} + FlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_) : FlagBase(name_, help_, std::move(matcher_), static_cast(options_)) {} + FlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, int options_) : NamedBase(name_, help_, options_), matcher(std::move(matcher_)) {} virtual ~FlagBase() {} @@ -1049,7 +1052,7 @@ namespace args { if (matcher.Match(flag)) { - if ((GetOptions() & Options::Single) != Options::None && matched) + if ((GetOptions() & static_cast(Options::Single)) && matched) { #ifdef ARGS_NOEXCEPT error = Error::Extra; @@ -1144,7 +1147,8 @@ namespace args { public: ValueFlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, const bool extraError_ = false) : FlagBase(name_, help_, std::move(matcher_), extraError_) {} - ValueFlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_) : FlagBase(name_, help_, std::move(matcher_), options_) {} + ValueFlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_) : ValueFlagBase(name_, help_, std::move(matcher_), static_cast(options_)) {} + ValueFlagBase(const std::string &name_, const std::string &help_, Matcher &&matcher_, int options_) : FlagBase(name_, help_, std::move(matcher_), options_) {} virtual ~ValueFlagBase() {} virtual Nargs NumberOfArguments() const noexcept override @@ -1399,7 +1403,7 @@ namespace args for (Base *child: Children()) { - if ((child->GetOptions() & Options::HiddenFromDescription) != Options::None) + if (child->GetOptions() & static_cast(Options::HiddenFromDescription)) { continue; } @@ -1420,7 +1424,7 @@ namespace args std::vector names; for (Base *child: Children()) { - if ((child->GetOptions() & Options::HiddenFromUsage) != Options::None) + if (child->GetOptions() & static_cast(Options::HiddenFromUsage)) { continue; } @@ -1752,7 +1756,7 @@ namespace args for (auto *child: Children()) { - if ((child->GetOptions() & Options::Global) != Options::None) + if (child->GetOptions() & static_cast(Options::Global)) { if (auto *res = child->Match(flag)) { @@ -1783,7 +1787,7 @@ namespace args for (auto *child: Children()) { - if ((child->GetOptions() & Options::Global) != Options::None) + if (child->GetOptions() & static_cast(Options::Global)) { if (auto *res = child->GetNextPositional()) { @@ -1949,7 +1953,7 @@ namespace args for (Base *child: Children()) { - if ((child->GetOptions() & Options::HiddenFromDescription) != Options::None) + if (child->GetOptions() & static_cast(Options::HiddenFromDescription)) { continue; } @@ -2828,12 +2832,20 @@ namespace args public: ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Nargs nargs_, std::function &)> action_, Options options_ = {}): + ActionFlag(group_, name_, help_, std::move(matcher_), nargs_, std::move(action_), static_cast(options_)) + { + } + ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Nargs nargs_, std::function &)> action_, int options_): FlagBase(name_, help_, std::move(matcher_), options_), action(std::move(action_)), nargs(nargs_) { group_.Add(*this); } ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, std::function action_, Options options_ = {}): + ActionFlag(group_, name_, help_, std::move(matcher_), std::move(action_), static_cast(options_)) + { + } + ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, std::function action_, int options_): FlagBase(name_, help_, std::move(matcher_), options_), nargs(1) { group_.Add(*this); @@ -2841,6 +2853,10 @@ namespace args } ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, std::function action_, Options options_ = {}): + ActionFlag(group_, name_, help_, std::move(matcher_), std::move(action_), static_cast(options_)) + { + } + ActionFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, std::function action_, int options_): FlagBase(name_, help_, std::move(matcher_), options_), nargs(0) { group_.Add(*this); @@ -2916,16 +2932,22 @@ namespace args public: - ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &defaultValue_, Options options_): ValueFlagBase(name_, help_, std::move(matcher_), options_), value(defaultValue_), defaultValue(defaultValue_) + ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &defaultValue_, Options options_): ValueFlag(group_, name_, help_, std::move(matcher_), defaultValue_, static_cast(options_)) { - group_.Add(*this); } + ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &defaultValue_, int options_): ValueFlagBase(name_, help_, std::move(matcher_), options_), value(defaultValue_), defaultValue(defaultValue_) + { + group_.Add(*this); + } ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, const T &defaultValue_ = T(), const bool extraError_ = false): ValueFlag(group_, name_, help_, std::move(matcher_), defaultValue_, extraError_ ? Options::Single : Options::None) { } - ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_): ValueFlag(group_, name_, help_, std::move(matcher_), T(), options_) + ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, Options options_): ValueFlag(group_, name_, help_, std::move(matcher_), static_cast(options_)) + { + } + ValueFlag(Group &group_, const std::string &name_, const std::string &help_, Matcher &&matcher_, int options_): ValueFlag(group_, name_, help_, std::move(matcher_), T(), options_) { } diff --git a/cpp_src/vendor/frozen/algorithm.h b/cpp_src/vendor/frozen/algorithm.h new file mode 100644 index 000000000..3abd529b6 --- /dev/null +++ b/cpp_src/vendor/frozen/algorithm.h @@ -0,0 +1,198 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_ALGORITHM_H +#define FROZEN_LETITGO_ALGORITHM_H + +#include "frozen/bits/basic_types.h" +#include "frozen/bits/version.h" +#include "frozen/string.h" + +namespace frozen { + +// 'search' implementation if C++17 is not available +// https://en.cppreference.com/w/cpp/algorithm/search +template +ForwardIterator search(ForwardIterator first, ForwardIterator last, const Searcher & searcher) +{ + return searcher(first, last).first; +} + +// text book implementation from +// https://en.wikipedia.org/wiki/Knuth%E2%80%93Morris%E2%80%93Pratt_algorithm + +template class knuth_morris_pratt_searcher { + bits::carray step_; + bits::carray needle_; + + static constexpr bits::carray + build_kmp_cache(char const (&needle)[size + 1]) { + std::ptrdiff_t cnd = 0; + bits::carray cache(-1); + for (std::size_t pos = 1; pos < size; ++pos) { + if (needle[pos] == needle[cnd]) { + cache[pos] = cache[cnd]; + cnd += 1; + } else { + cache[pos] = cnd; + cnd = cache[cnd]; + while (cnd >= 0 && needle[pos] != needle[cnd]) + cnd = cache[cnd]; + cnd += 1; + } + } + return cache; + } + +public: + constexpr knuth_morris_pratt_searcher(char const (&needle)[size + 1]) + : step_{build_kmp_cache(needle)}, needle_(needle) {} + + template + constexpr std::pair operator()(ForwardIterator first, ForwardIterator last) const { + std::size_t i = 0; + ForwardIterator iter = first; + while (iter != last) { + if (needle_[i] == *iter) { + if (i == (size - 1)) + return { iter - i, iter - i + size }; + ++i; + ++iter; + } else { + if (step_[i] > -1) { + i = step_[i]; + } else { + ++iter; + i = 0; + } + } + } + return { last, last }; + } +}; + +template +constexpr knuth_morris_pratt_searcher make_knuth_morris_pratt_searcher(char const (&needle)[N]) { + return {needle}; +} + +// text book implementation from +// https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore%E2%80%93Horspool_algorithm + +template class boyer_moore_searcher { + using skip_table_type = bits::carray; + using suffix_table_type = bits::carray; + + skip_table_type skip_table_; + suffix_table_type suffix_table_; + bits::carray needle_; + + constexpr auto build_skip_table(char const (&needle)[size + 1]) { + skip_table_type skip_table(size); + for (std::size_t i = 0; i < size - 1; ++i) + skip_table[needle[i]] -= i + 1; + return skip_table; + } + + constexpr bool is_prefix(char const (&needle)[size + 1], std::size_t pos) { + std::size_t suffixlen = size - pos; + + for (std::size_t i = 0; i < suffixlen; i++) { + if (needle[i] != needle[pos + i]) + return false; + } + return true; + } + + constexpr std::size_t suffix_length(char const (&needle)[size + 1], + std::size_t pos) { + // increment suffix length slen to the first mismatch or beginning + // of the word + for (std::size_t slen = 0; slen < pos ; slen++) + if (needle[pos - slen] != needle[size - 1 - slen]) + return slen; + + return pos; + } + + constexpr auto build_suffix_table(char const (&needle)[size + 1]) { + suffix_table_type suffix; + std::ptrdiff_t last_prefix_index = size - 1; + + // first loop + for (std::ptrdiff_t p = size - 1; p >= 0; p--) { + if (is_prefix(needle, p + 1)) + last_prefix_index = p + 1; + + suffix[p] = last_prefix_index + (size - 1 - p); + } + + // second loop + for (std::size_t p = 0; p < size - 1; p++) { + auto slen = suffix_length(needle, p); + if (needle[p - slen] != needle[size - 1 - slen]) + suffix[size - 1 - slen] = size - 1 - p + slen; + + } + return suffix; + } + +public: + constexpr boyer_moore_searcher(char const (&needle)[size + 1]) + : skip_table_{build_skip_table(needle)}, + suffix_table_{build_suffix_table(needle)}, + needle_(needle) {} + + template + constexpr std::pair operator()(RandomAccessIterator first, RandomAccessIterator last) const { + if (size == 0) + return { first, first }; + + if (size > size_t(last - first)) + return { last, last }; + + RandomAccessIterator iter = first + size - 1; + while (true) { + std::ptrdiff_t j = size - 1; + while (j > 0 && (*iter == needle_[j])) { + --iter; + --j; + } + if (j == 0 && *iter == needle_[0]) + return { iter, iter + size}; + + std::ptrdiff_t jump = std::max(skip_table_[*iter], suffix_table_[j]); + if (jump >= last - iter) + return { last, last }; + iter += jump; + } + } +}; + +template +constexpr boyer_moore_searcher make_boyer_moore_searcher(char const (&needle)[N]) { + return {needle}; +} + +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/frozen/bits/algorithms.h b/cpp_src/vendor/frozen/bits/algorithms.h new file mode 100644 index 000000000..4efa61b21 --- /dev/null +++ b/cpp_src/vendor/frozen/bits/algorithms.h @@ -0,0 +1,235 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_BITS_ALGORITHMS_H +#define FROZEN_LETITGO_BITS_ALGORITHMS_H + +#include "frozen/bits/basic_types.h" + +#include +#include + +namespace frozen { + +namespace bits { + +auto constexpr next_highest_power_of_two(std::size_t v) { + // https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 + constexpr auto trip_count = std::numeric_limits::digits; + v--; + for(std::size_t i = 1; i < trip_count; i <<= 1) + v |= v >> i; + v++; + return v; +} + +template +auto constexpr log(T v) { + std::size_t n = 0; + while (v > 1) { + n += 1; + v >>= 1; + } + return n; +} + +constexpr std::size_t bit_weight(std::size_t n) { + return (n <= 8*sizeof(unsigned int)) + + (n <= 8*sizeof(unsigned long)) + + (n <= 8*sizeof(unsigned long long)) + + (n <= 128); +} + +unsigned int select_uint_least(std::integral_constant); +unsigned long select_uint_least(std::integral_constant); +unsigned long long select_uint_least(std::integral_constant); +template +unsigned long long select_uint_least(std::integral_constant) { + static_assert(N < 2, "unsupported type size"); + return {}; +} + + +template +using select_uint_least_t = decltype(select_uint_least(std::integral_constant())); + +template +constexpr auto min_element(Iter begin, const Iter end, + Compare const &compare) { + auto result = begin; + while (begin != end) { + if (compare(*begin, *result)) { + result = begin; + } + ++begin; + } + return result; +} + +template +constexpr void cswap(T &a, T &b) { + auto tmp = a; + a = b; + b = tmp; +} + +template +constexpr void cswap(std::pair & a, std::pair & b) { + cswap(a.first, b.first); + cswap(a.second, b.second); +} + +template +constexpr void cswap(std::tuple &a, std::tuple &b, std::index_sequence) { + using swallow = int[]; + (void) swallow{(cswap(std::get(a), std::get(b)), 0)...}; +} + +template +constexpr void cswap(std::tuple &a, std::tuple &b) { + cswap(a, b, std::make_index_sequence()); +} + +template +constexpr void iter_swap(Iter a, Iter b) { + cswap(*a, *b); +} + +template +constexpr Iterator partition(Iterator left, Iterator right, Compare const &compare) { + auto pivot = left + (right - left) / 2; + iter_swap(right, pivot); + pivot = right; + for (auto it = left; 0 < right - it; ++it) { + if (compare(*it, *pivot)) { + iter_swap(it, left); + left++; + } + } + iter_swap(pivot, left); + pivot = left; + return pivot; +} + +template +constexpr void quicksort(Iterator left, Iterator right, Compare const &compare) { + while (0 < right - left) { + auto new_pivot = bits::partition(left, right, compare); + quicksort(left, new_pivot, compare); + left = new_pivot + 1; + } +} + +template +constexpr Container quicksort(Container const &array, + Compare const &compare) { + Container res = array; + quicksort(res.begin(), res.end() - 1, compare); + return res; +} + +template struct LowerBound { + T const &value_; + Compare const &compare_; + constexpr LowerBound(T const &value, Compare const &compare) + : value_(value), compare_(compare) {} + + template + inline constexpr ForwardIt doit_fast(ForwardIt first, + std::integral_constant) { + return first; + } + + template + inline constexpr ForwardIt doit_fast(ForwardIt first, + std::integral_constant) { + auto constexpr step = N / 2; + static_assert(N/2 == N - N / 2 - 1, "power of two minus 1"); + auto it = first + step; + auto next_it = compare_(*it, value_) ? it + 1 : first; + return doit_fast(next_it, std::integral_constant{}); + } + + template + inline constexpr ForwardIt doitfirst(ForwardIt first, std::integral_constant, std::integral_constant) { + return doit_fast(first, std::integral_constant{}); + } + + template + inline constexpr ForwardIt doitfirst(ForwardIt first, std::integral_constant, std::integral_constant) { + auto constexpr next_power = next_highest_power_of_two(N); + auto constexpr next_start = next_power / 2 - 1; + auto it = first + next_start; + if (compare_(*it, value_)) { + auto constexpr next = N - next_start - 1; + return doitfirst(it + 1, std::integral_constant{}, std::integral_constant{}); + } + else + return doit_fast(first, std::integral_constant{}); + } + + template + inline constexpr ForwardIt doitfirst(ForwardIt first, std::integral_constant, std::integral_constant) { + return doit_fast(first, std::integral_constant{}); + } +}; + +template +constexpr ForwardIt lower_bound(ForwardIt first, const T &value, Compare const &compare) { + return LowerBound{value, compare}.doitfirst(first, std::integral_constant{}, std::integral_constant{}); +} + +template +constexpr bool binary_search(ForwardIt first, const T &value, + Compare const &compare) { + ForwardIt where = lower_bound(first, value, compare); + return (!(where == first + N) && !(compare(value, *where))); +} + + +template +constexpr bool equal(InputIt1 first1, InputIt1 last1, InputIt2 first2) +{ + for (; first1 != last1; ++first1, ++first2) { + if (!(*first1 == *first2)) { + return false; + } + } + return true; +} + +template +constexpr bool lexicographical_compare(InputIt1 first1, InputIt1 last1, InputIt2 first2, InputIt2 last2) +{ + for (; (first1 != last1) && (first2 != last2); ++first1, ++first2) { + if (*first1 < *first2) + return true; + if (*first2 < *first1) + return false; + } + return (first1 == last1) && (first2 != last2); +} + +} // namespace bits +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/frozen/bits/basic_types.h b/cpp_src/vendor/frozen/bits/basic_types.h new file mode 100644 index 000000000..239270afc --- /dev/null +++ b/cpp_src/vendor/frozen/bits/basic_types.h @@ -0,0 +1,198 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_BASIC_TYPES_H +#define FROZEN_LETITGO_BASIC_TYPES_H + +#include "frozen/bits/exceptions.h" + +#include +#include +#include +#include + +namespace frozen { + +namespace bits { + +// used as a fake argument for frozen::make_set and frozen::make_map in the case of N=0 +struct ignored_arg {}; + +template +class cvector { + T data [N] = {}; // zero-initialization for scalar type T, default-initialized otherwise + std::size_t dsize = 0; + +public: + // Container typdefs + using value_type = T; + using reference = value_type &; + using const_reference = const value_type &; + using pointer = value_type *; + using const_pointer = const value_type *; + using iterator = pointer; + using const_iterator = const_pointer; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + + // Constructors + constexpr cvector(void) = default; + constexpr cvector(size_type count, const T& value) : dsize(count) { + for (std::size_t i = 0; i < N; ++i) + data[i] = value; + } + + // Iterators + constexpr iterator begin() noexcept { return data; } + constexpr iterator end() noexcept { return data + dsize; } + constexpr const_iterator begin() const noexcept { return data; } + constexpr const_iterator end() const noexcept { return data + dsize; } + + // Capacity + constexpr size_type size() const { return dsize; } + + // Element access + constexpr reference operator[](std::size_t index) { return data[index]; } + constexpr const_reference operator[](std::size_t index) const { return data[index]; } + + constexpr reference back() { return data[dsize - 1]; } + constexpr const_reference back() const { return data[dsize - 1]; } + + // Modifiers + constexpr void push_back(const T & a) { data[dsize++] = a; } + constexpr void push_back(T && a) { data[dsize++] = std::move(a); } + constexpr void pop_back() { --dsize; } + + constexpr void clear() { dsize = 0; } +}; + +template +class carray { + T data_ [N] = {}; // zero-initialization for scalar type T, default-initialized otherwise + + template + constexpr carray(Iter iter, std::index_sequence) + : data_{((void)I, *iter++)...} {} + template + constexpr carray(const T& value, std::index_sequence) + : data_{((void)I, value)...} {} + +public: + // Container typdefs + using value_type = T; + using reference = value_type &; + using const_reference = const value_type &; + using pointer = value_type *; + using const_pointer = const value_type *; + using iterator = pointer; + using const_iterator = const_pointer; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + + // Constructors + constexpr carray() = default; + constexpr carray(const value_type& val) + : carray(val, std::make_index_sequence()) {} + template ::value, std::size_t> M> + constexpr carray(U const (&init)[M]) + : carray(init, std::make_index_sequence()) + { + static_assert(M >= N, "Cannot initialize a carray with an smaller array"); + } + template ::value, std::size_t> M> + constexpr carray(std::array const &init) + : carray(init.begin(), std::make_index_sequence()) + { + static_assert(M >= N, "Cannot initialize a carray with an smaller array"); + } + template ::value>* = nullptr> + constexpr carray(std::initializer_list init) + : carray(init.begin(), std::make_index_sequence()) + { + // clang & gcc doesn't recognize init.size() as a constexpr + // static_assert(init.size() >= N, "Cannot initialize a carray with an smaller initializer list"); + } + template ::value>* = nullptr> + constexpr carray(const carray& rhs) + : carray(rhs.begin(), std::make_index_sequence()) + { + } + + // Iterators + constexpr iterator begin() noexcept { return data_; } + constexpr const_iterator begin() const noexcept { return data_; } + constexpr iterator end() noexcept { return data_ + N; } + constexpr const_iterator end() const noexcept { return data_ + N; } + + // Capacity + constexpr size_type size() const { return N; } + constexpr size_type max_size() const { return N; } + + // Element access + constexpr reference operator[](std::size_t index) { return data_[index]; } + constexpr const_reference operator[](std::size_t index) const { return data_[index]; } + + constexpr reference at(std::size_t index) { + if (index > N) + FROZEN_THROW_OR_ABORT(std::out_of_range("Index (" + std::to_string(index) + ") out of bound (" + std::to_string(N) + ')')); + return data_[index]; + } + constexpr const_reference at(std::size_t index) const { + if (index > N) + FROZEN_THROW_OR_ABORT(std::out_of_range("Index (" + std::to_string(index) + ") out of bound (" + std::to_string(N) + ')')); + return data_[index]; + } + + constexpr reference front() { return data_[0]; } + constexpr const_reference front() const { return data_[0]; } + + constexpr reference back() { return data_[N - 1]; } + constexpr const_reference back() const { return data_[N - 1]; } + + constexpr value_type* data() noexcept { return data_; } + constexpr const value_type* data() const noexcept { return data_; } +}; +template +class carray { + +public: + // Container typdefs + using value_type = T; + using reference = value_type &; + using const_reference = const value_type &; + using pointer = value_type *; + using const_pointer = const value_type *; + using iterator = pointer; + using const_iterator = const_pointer; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + + // Constructors + constexpr carray(void) = default; + +}; + +} // namespace bits + +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/frozen/bits/constexpr_assert.h b/cpp_src/vendor/frozen/bits/constexpr_assert.h new file mode 100644 index 000000000..912210dc2 --- /dev/null +++ b/cpp_src/vendor/frozen/bits/constexpr_assert.h @@ -0,0 +1,40 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_CONSTEXPR_ASSERT_H +#define FROZEN_LETITGO_CONSTEXPR_ASSERT_H + +#include + +#ifdef _MSC_VER + +// FIXME: find a way to implement that correctly for msvc +#define constexpr_assert(cond, msg) + +#else + +#define constexpr_assert(cond, msg)\ + assert(cond && msg); +#endif + +#endif + diff --git a/cpp_src/vendor/frozen/bits/defines.h b/cpp_src/vendor/frozen/bits/defines.h new file mode 100644 index 000000000..839f4e833 --- /dev/null +++ b/cpp_src/vendor/frozen/bits/defines.h @@ -0,0 +1,66 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_DEFINES_H +#define FROZEN_LETITGO_DEFINES_H + +#if defined(_MSVC_LANG) && !(defined(__EDG__) && defined(__clang__)) // TRANSITION, VSO#273681 + #define FROZEN_LETITGO_IS_MSVC +#endif + +// Code taken from https://stackoverflow.com/questions/43639122/which-values-can-msvc-lang-have +#if defined(FROZEN_LETITGO_IS_MSVC) + #if _MSVC_LANG > 201402 + #define FROZEN_LETITGO_HAS_CXX17 1 + #else /* _MSVC_LANG > 201402 */ + #define FROZEN_LETITGO_HAS_CXX17 0 + #endif /* _MSVC_LANG > 201402 */ +#else /* _MSVC_LANG etc. */ + #if __cplusplus > 201402 + #define FROZEN_LETITGO_HAS_CXX17 1 + #else /* __cplusplus > 201402 */ + #define FROZEN_LETITGO_HAS_CXX17 0 + #endif /* __cplusplus > 201402 */ +#endif /* _MSVC_LANG etc. */ +// End if taken code + +#if FROZEN_LETITGO_HAS_CXX17 == 1 && defined(FROZEN_LETITGO_IS_MSVC) + #define FROZEN_LETITGO_HAS_STRING_VIEW // We assume Visual Studio always has string_view in C++17 +#else + #if FROZEN_LETITGO_HAS_CXX17 == 1 && __has_include() + #define FROZEN_LETITGO_HAS_STRING_VIEW + #endif +#endif + +#ifdef __cpp_char8_t + #define FROZEN_LETITGO_HAS_CHAR8T +#endif + +#if defined(__cpp_deduction_guides) && __cpp_deduction_guides >= 201703L + #define FROZEN_LETITGO_HAS_DEDUCTION_GUIDES +#endif + +#if defined(__cpp_lib_constexpr_string) && __cpp_lib_constexpr_string >= 201907L + #define FROZEN_LETITGO_HAS_CONSTEXPR_STRING +#endif + +#endif // FROZEN_LETITGO_DEFINES_H diff --git a/cpp_src/vendor/frozen/bits/elsa.h b/cpp_src/vendor/frozen/bits/elsa.h new file mode 100644 index 000000000..6c9ecb78f --- /dev/null +++ b/cpp_src/vendor/frozen/bits/elsa.h @@ -0,0 +1,57 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_ELSA_H +#define FROZEN_LETITGO_ELSA_H + +#include + +namespace frozen { + +template struct elsa { + static_assert(std::is_integral::value || std::is_enum::value, + "only supports integral types, specialize for other types"); + + constexpr std::size_t operator()(T const &value, std::size_t seed) const { + std::size_t key = seed ^ static_cast(value); + key = (~key) + (key << 21); // key = (key << 21) - key - 1; + key = key ^ (key >> 24); + key = (key + (key << 3)) + (key << 8); // key * 265 + key = key ^ (key >> 14); + key = (key + (key << 2)) + (key << 4); // key * 21 + key = key ^ (key >> 28); + key = key + (key << 31); + return key; + } +}; + +template <> struct elsa { + template + constexpr std::size_t operator()(T const &value, std::size_t seed) const { + return elsa{}(value, seed); + } +}; + +template using anna = elsa; +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/frozen/bits/elsa_std.h b/cpp_src/vendor/frozen/bits/elsa_std.h new file mode 100644 index 000000000..df1a9cfc3 --- /dev/null +++ b/cpp_src/vendor/frozen/bits/elsa_std.h @@ -0,0 +1,41 @@ +#ifndef FROZEN_LETITGO_BITS_ELSA_STD_H +#define FROZEN_LETITGO_BITS_ELSA_STD_H + +#include "defines.h" +#include "elsa.h" +#include "hash_string.h" + +#ifdef FROZEN_LETITGO_HAS_STRING_VIEW +#include +#endif +#include + +namespace frozen { + +#ifdef FROZEN_LETITGO_HAS_STRING_VIEW + +template struct elsa> +{ + constexpr std::size_t operator()(const std::basic_string_view& value) const { + return hash_string(value); + } + constexpr std::size_t operator()(const std::basic_string_view& value, std::size_t seed) const { + return hash_string(value, seed); + } +}; + +#endif + +template struct elsa> +{ + constexpr std::size_t operator()(const std::basic_string& value) const { + return hash_string(value); + } + constexpr std::size_t operator()(const std::basic_string& value, std::size_t seed) const { + return hash_string(value, seed); + } +}; + +} // namespace frozen + +#endif // FROZEN_LETITGO_BITS_ELSA_STD_H diff --git a/cpp_src/vendor/frozen/bits/exceptions.h b/cpp_src/vendor/frozen/bits/exceptions.h new file mode 100644 index 000000000..b43e3e6b9 --- /dev/null +++ b/cpp_src/vendor/frozen/bits/exceptions.h @@ -0,0 +1,39 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_EXCEPTIONS_H +#define FROZEN_LETITGO_EXCEPTIONS_H + +#if defined(FROZEN_NO_EXCEPTIONS) || (defined(_MSC_VER) && !defined(_CPPUNWIND)) || (!defined(_MSC_VER) && !defined(__cpp_exceptions)) + +#include +#define FROZEN_THROW_OR_ABORT(_) std::abort() + +#else + +#include +#define FROZEN_THROW_OR_ABORT(err) throw err + + +#endif + +#endif diff --git a/cpp_src/vendor/frozen/bits/hash_string.h b/cpp_src/vendor/frozen/bits/hash_string.h new file mode 100644 index 000000000..b2f7e90e6 --- /dev/null +++ b/cpp_src/vendor/frozen/bits/hash_string.h @@ -0,0 +1,28 @@ +#ifndef FROZEN_LETITGO_BITS_HASH_STRING_H +#define FROZEN_LETITGO_BITS_HASH_STRING_H + +#include + +namespace frozen { + +template +constexpr std::size_t hash_string(const String& value) { + std::size_t d = 5381; + for (const auto& c : value) + d = d * 33 + static_cast(c); + return d; +} + +// https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function +// With the lowest bits removed, based on experimental setup. +template +constexpr std::size_t hash_string(const String& value, std::size_t seed) { + std::size_t d = (0x811c9dc5 ^ seed) * static_cast(0x01000193); + for (const auto& c : value) + d = (d ^ static_cast(c)) * static_cast(0x01000193); + return d >> 8 ; +} + +} // namespace frozen + +#endif // FROZEN_LETITGO_BITS_HASH_STRING_H \ No newline at end of file diff --git a/cpp_src/vendor/frozen/bits/mpl.h b/cpp_src/vendor/frozen/bits/mpl.h new file mode 100644 index 000000000..8f87f99c8 --- /dev/null +++ b/cpp_src/vendor/frozen/bits/mpl.h @@ -0,0 +1,56 @@ +/* + * Frozen + * Copyright 2022 Giel van Schijndel + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_BITS_MPL_H +#define FROZEN_LETITGO_BITS_MPL_H + +#include + +namespace frozen { + +namespace bits { + +// Forward declarations +template +class carray; + +template +struct remove_cv : std::remove_cv {}; + +template +struct remove_cv> { + using type = std::pair::type...>; +}; + +template +struct remove_cv> { + using type = carray::type, N>; +}; + +template +using remove_cv_t = typename remove_cv::type; + +} // namespace bits + +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/frozen/bits/pmh.h b/cpp_src/vendor/frozen/bits/pmh.h new file mode 100644 index 000000000..1bb402163 --- /dev/null +++ b/cpp_src/vendor/frozen/bits/pmh.h @@ -0,0 +1,254 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// inspired from http://stevehanov.ca/blog/index.php?id=119 +#ifndef FROZEN_LETITGO_PMH_H +#define FROZEN_LETITGO_PMH_H + +#include "frozen/bits/algorithms.h" +#include "frozen/bits/basic_types.h" + +#include +#include +#include +#include + +namespace frozen { + +namespace bits { + +// Function object for sorting buckets in decreasing order of size +struct bucket_size_compare { + template + bool constexpr operator()(B const &b0, + B const &b1) const { + return b0.size() > b1.size(); + } +}; + +// Step One in pmh routine is to take all items and hash them into buckets, +// with some collisions. Then process those buckets further to build a perfect +// hash function. +// pmh_buckets represents the initial placement into buckets. + +template +struct pmh_buckets { + // Step 0: Bucket max is 2 * sqrt M + // TODO: Come up with justification for this, should it not be O(log M)? + static constexpr auto bucket_max = 2 * (1u << (log(M) / 2)); + + using bucket_t = cvector; + carray buckets; + std::uint64_t seed; + + // Represents a reference to a bucket. This is used because the buckets + // have to be sorted, but buckets are big, making it slower than sorting refs + struct bucket_ref { + unsigned hash; + const bucket_t * ptr; + + // Forward some interface of bucket + using value_type = typename bucket_t::value_type; + using const_iterator = typename bucket_t::const_iterator; + + constexpr auto size() const { return ptr->size(); } + constexpr const auto & operator[](std::size_t idx) const { return (*ptr)[idx]; } + constexpr auto begin() const { return ptr->begin(); } + constexpr auto end() const { return ptr->end(); } + }; + + // Make a bucket_ref for each bucket + template + carray constexpr make_bucket_refs(std::index_sequence) const { + return {{ bucket_ref{Is, &buckets[Is]}... }}; + } + + // Makes a bucket_ref for each bucket and sorts them by size + carray constexpr get_sorted_buckets() const { + carray result{this->make_bucket_refs(std::make_index_sequence())}; + bits::quicksort(result.begin(), result.end() - 1, bucket_size_compare{}); + return result; + } +}; + +template +pmh_buckets constexpr make_pmh_buckets(const carray & items, + Hash const & hash, + Key const & key, + PRG & prg) { + using result_t = pmh_buckets; + // Continue until all items are placed without exceeding bucket_max + while (1) { + result_t result{}; + result.seed = prg(); + bool rejected = false; + for (std::size_t i = 0; i < items.size(); ++i) { + auto & bucket = result.buckets[hash(key(items[i]), static_cast(result.seed)) % M]; + if (bucket.size() >= result_t::bucket_max) { + rejected = true; + break; + } + bucket.push_back(i); + } + if (!rejected) { return result; } + } +} + +// Check if an item appears in a cvector +template +constexpr bool all_different_from(cvector & data, T & a) { + for (std::size_t i = 0; i < data.size(); ++i) + if (data[i] == a) + return false; + + return true; +} + +// Represents either an index to a data item array, or a seed to be used with +// a hasher. Seed must have high bit of 1, value has high bit of zero. +struct seed_or_index { + using value_type = std::uint64_t; + +private: + static constexpr value_type MINUS_ONE = std::numeric_limits::max(); + static constexpr value_type HIGH_BIT = ~(MINUS_ONE >> 1); + + value_type value_ = 0; + +public: + constexpr value_type value() const { return value_; } + constexpr bool is_seed() const { return value_ & HIGH_BIT; } + + constexpr seed_or_index(bool is_seed, value_type value) + : value_(is_seed ? (value | HIGH_BIT) : (value & ~HIGH_BIT)) {} + + constexpr seed_or_index() = default; + constexpr seed_or_index(const seed_or_index &) = default; + constexpr seed_or_index & operator =(const seed_or_index &) = default; +}; + +// Represents the perfect hash function created by pmh algorithm +template +struct pmh_tables : private Hasher { + std::uint64_t first_seed_; + carray first_table_; + carray second_table_; + + constexpr pmh_tables( + std::uint64_t first_seed, + carray first_table, + carray second_table, + Hasher hash) noexcept + : Hasher(hash) + , first_seed_(first_seed) + , first_table_(first_table) + , second_table_(second_table) + {} + + constexpr Hasher const& hash_function() const noexcept { + return static_cast(*this); + } + + template + constexpr std::size_t lookup(const KeyType & key) const { + return lookup(key, hash_function()); + } + + // Looks up a given key, to find its expected index in carray + // Always returns a valid index, must use KeyEqual test after to confirm. + template + constexpr std::size_t lookup(const KeyType & key, const HasherType& hasher) const { + auto const d = first_table_[hasher(key, static_cast(first_seed_)) % M]; + if (!d.is_seed()) { return static_cast(d.value()); } // this is narrowing std::uint64 -> std::size_t but should be fine + else { return second_table_[hasher(key, static_cast(d.value())) % M]; } + } +}; + +// Make pmh tables for given items, hash function, prg, etc. +template +pmh_tables constexpr make_pmh_tables(const carray & + items, + Hash const &hash, + Key const &key, + PRG prg) { + // Step 1: Place all of the keys into buckets + auto step_one = make_pmh_buckets(items, hash, key, prg); + + // Step 2: Sort the buckets to process the ones with the most items first. + auto buckets = step_one.get_sorted_buckets(); + + // Special value for unused slots. This is purposefully the index + // one-past-the-end of 'items' to function as a sentinel value. Both to avoid + // the need to apply the KeyEqual predicate and to be easily convertible to + // end(). + // Unused entries in both hash tables (G and H) have to contain this value. + const auto UNUSED = items.size(); + + // G becomes the first hash table in the resulting pmh function + carray G({false, UNUSED}); + + // H becomes the second hash table in the resulting pmh function + carray H(UNUSED); + + // Step 3: Map the items in buckets into hash tables. + for (const auto & bucket : buckets) { + auto const bsize = bucket.size(); + + if (bsize == 1) { + // Store index to the (single) item in G + // assert(bucket.hash == hash(key(items[bucket[0]]), step_one.seed) % M); + G[bucket.hash] = {false, static_cast(bucket[0])}; + } else if (bsize > 1) { + + // Repeatedly try different H of d until we find a hash function + // that places all items in the bucket into free slots + seed_or_index d{true, prg()}; + cvector bucket_slots; + + while (bucket_slots.size() < bsize) { + auto slot = hash(key(items[bucket[bucket_slots.size()]]), static_cast(d.value())) % M; + + if (H[slot] != UNUSED || !all_different_from(bucket_slots, slot)) { + bucket_slots.clear(); + d = {true, prg()}; + continue; + } + + bucket_slots.push_back(slot); + } + + // Put successful seed in G, and put indices to items in their slots + // assert(bucket.hash == hash(key(items[bucket[0]]), step_one.seed) % M); + G[bucket.hash] = d; + for (std::size_t i = 0; i < bsize; ++i) + H[bucket_slots[i]] = bucket[i]; + } + } + + return {step_one.seed, G, H, hash}; +} + +} // namespace bits + +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/frozen/bits/version.h b/cpp_src/vendor/frozen/bits/version.h new file mode 100644 index 000000000..7e57d707e --- /dev/null +++ b/cpp_src/vendor/frozen/bits/version.h @@ -0,0 +1,30 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_VERSION_H +#define FROZEN_LETITGO_VERSION_H + +#define FROZEN_MAJOR_VERSION 1 +#define FROZEN_MINOR_VERSION 1 +#define FROZEN_PATCH_VERSION 1 + +#endif diff --git a/cpp_src/vendor/frozen/map.h b/cpp_src/vendor/frozen/map.h new file mode 100644 index 000000000..d54128a6c --- /dev/null +++ b/cpp_src/vendor/frozen/map.h @@ -0,0 +1,357 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_MAP_H +#define FROZEN_LETITGO_MAP_H + +#include "frozen/bits/algorithms.h" +#include "frozen/bits/basic_types.h" +#include "frozen/bits/constexpr_assert.h" +#include "frozen/bits/exceptions.h" +#include "frozen/bits/mpl.h" +#include "frozen/bits/version.h" + +#include +#include + +namespace frozen { + +namespace impl { + +template class CompareKey : private Comparator { +public: + constexpr Comparator const& key_comp() const noexcept { + return static_cast(*this); + } + + constexpr CompareKey(Comparator const &comparator) + : Comparator(comparator) {} + + template + constexpr int operator()(std::pair const &self, + std::pair const &other) const { + return key_comp()(std::get<0>(self), std::get<0>(other)); + } + + template + constexpr int operator()(Key1 const &self_key, + std::pair const &other) const { + return key_comp()(self_key, std::get<0>(other)); + } + + template + constexpr int operator()(std::pair const &self, + Key2 const &other_key) const { + return key_comp()(std::get<0>(self), other_key); + } + + template + constexpr int operator()(Key1 const &self_key, Key2 const &other_key) const { + return key_comp()(self_key, other_key); + } +}; + +} // namespace impl + +template > +class map : private impl::CompareKey { + using container_type = bits::carray, N>; + container_type items_; + +public: + using key_type = Key; + using mapped_type = Value; + using value_type = typename container_type::value_type; + using size_type = typename container_type::size_type; + using difference_type = typename container_type::difference_type; + using key_compare = Compare; + using value_compare = impl::CompareKey; + using reference = typename container_type::reference; + using const_reference = typename container_type::const_reference; + using pointer = typename container_type::pointer; + using const_pointer = typename container_type::const_pointer; + using iterator = typename container_type::iterator; + using const_iterator = typename container_type::const_iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + +public: + /* constructors */ + constexpr map(container_type items, Compare const &compare) + : impl::CompareKey{compare} + , items_{bits::quicksort(bits::remove_cv_t(items), value_comp())} {} + + explicit constexpr map(container_type items) + : map{items, Compare{}} {} + + constexpr map(std::initializer_list items, Compare const &compare) + : map{container_type {items}, compare} { + constexpr_assert(items.size() == N, "Inconsistent initializer_list size and type size argument"); + } + + constexpr map(std::initializer_list items) + : map{items, Compare{}} {} + + /* element access */ + constexpr Value const& at(Key const &key) const { + return at_impl(*this, key); + } + constexpr Value& at(Key const &key) { + return at_impl(*this, key); + } + + /* iterators */ + constexpr iterator begin() { return items_.begin(); } + constexpr const_iterator begin() const { return items_.begin(); } + constexpr const_iterator cbegin() const { return items_.begin(); } + constexpr iterator end() { return items_.end(); } + constexpr const_iterator end() const { return items_.end(); } + constexpr const_iterator cend() const { return items_.end(); } + + constexpr reverse_iterator rbegin() { return reverse_iterator{items_.end()}; } + constexpr const_reverse_iterator rbegin() const { return const_reverse_iterator{items_.end()}; } + constexpr const_reverse_iterator crbegin() const { return const_reverse_iterator{items_.end()}; } + constexpr reverse_iterator rend() { return reverse_iterator{items_.begin()}; } + constexpr const_reverse_iterator rend() const { return const_reverse_iterator{items_.begin()}; } + constexpr const_reverse_iterator crend() const { return const_reverse_iterator{items_.begin()}; } + + /* capacity */ + constexpr bool empty() const { return !N; } + constexpr size_type size() const { return N; } + constexpr size_type max_size() const { return N; } + + /* lookup */ + + template + constexpr std::size_t count(KeyType const &key) const { + return bits::binary_search(items_.begin(), key, value_comp()); + } + + template + constexpr const_iterator find(KeyType const &key) const { + return map::find_impl(*this, key); + } + template + constexpr iterator find(KeyType const &key) { + return map::find_impl(*this, key); + } + + template + constexpr bool contains(KeyType const &key) const { + return this->find(key) != this->end(); + } + + template + constexpr std::pair + equal_range(KeyType const &key) const { + return equal_range_impl(*this, key); + } + template + constexpr std::pair equal_range(KeyType const &key) { + return equal_range_impl(*this, key); + } + + template + constexpr const_iterator lower_bound(KeyType const &key) const { + return lower_bound_impl(*this, key); + } + template + constexpr iterator lower_bound(KeyType const &key) { + return lower_bound_impl(*this, key); + } + + template + constexpr const_iterator upper_bound(KeyType const &key) const { + return upper_bound_impl(*this, key); + } + template + constexpr iterator upper_bound(KeyType const &key) { + return upper_bound_impl(*this, key); + } + + /* observers */ + constexpr const key_compare& key_comp() const { return value_comp().key_comp(); } + constexpr const value_compare& value_comp() const { return static_cast const&>(*this); } + + private: + template + static inline constexpr auto& at_impl(This&& self, KeyType const &key) { + auto where = self.find(key); + if (where != self.end()) + return where->second; + else + FROZEN_THROW_OR_ABORT(std::out_of_range("unknown key")); + } + + template + static inline constexpr auto find_impl(This&& self, KeyType const &key) { + auto where = self.lower_bound(key); + if (where != self.end() && !self.value_comp()(key, *where)) + return where; + else + return self.end(); + } + + template + static inline constexpr auto equal_range_impl(This&& self, KeyType const &key) { + auto lower = self.lower_bound(key); + using lower_t = decltype(lower); + if (lower != self.end() && !self.value_comp()(key, *lower)) + return std::pair{lower, lower + 1}; + else + return std::pair{lower, lower}; + } + + template + static inline constexpr auto lower_bound_impl(This&& self, KeyType const &key) -> decltype(self.end()) { + return bits::lower_bound(self.items_.begin(), key, self.value_comp()); + } + + template + static inline constexpr auto upper_bound_impl(This&& self, KeyType const &key) { + auto lower = self.lower_bound(key); + if (lower != self.end() && !self.value_comp()(key, *lower)) + return lower + 1; + else + return lower; + } +}; + +template +class map : private impl::CompareKey { + using container_type = bits::carray, 0>; + +public: + using key_type = Key; + using mapped_type = Value; + using value_type = typename container_type::value_type; + using size_type = typename container_type::size_type; + using difference_type = typename container_type::difference_type; + using key_compare = Compare; + using value_compare = impl::CompareKey; + using reference = typename container_type::reference; + using const_reference = typename container_type::const_reference; + using pointer = typename container_type::pointer; + using const_pointer = typename container_type::const_pointer; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = pointer; + using const_reverse_iterator = const_pointer; + +public: + /* constructors */ + constexpr map(const map &other) = default; + constexpr map(std::initializer_list, Compare const &compare) + : impl::CompareKey{compare} {} + constexpr map(std::initializer_list items) + : map{items, Compare{}} {} + + /* element access */ + template + constexpr mapped_type at(KeyType const &) const { + FROZEN_THROW_OR_ABORT(std::out_of_range("invalid key")); + } + template + constexpr mapped_type at(KeyType const &) { + FROZEN_THROW_OR_ABORT(std::out_of_range("invalid key")); + } + + /* iterators */ + constexpr iterator begin() { return nullptr; } + constexpr const_iterator begin() const { return nullptr; } + constexpr const_iterator cbegin() const { return nullptr; } + constexpr iterator end() { return nullptr; } + constexpr const_iterator end() const { return nullptr; } + constexpr const_iterator cend() const { return nullptr; } + + constexpr reverse_iterator rbegin() { return nullptr; } + constexpr const_reverse_iterator rbegin() const { return nullptr; } + constexpr const_reverse_iterator crbegin() const { return nullptr; } + constexpr reverse_iterator rend() { return nullptr; } + constexpr const_reverse_iterator rend() const { return nullptr; } + constexpr const_reverse_iterator crend() const { return nullptr; } + + /* capacity */ + constexpr bool empty() const { return true; } + constexpr size_type size() const { return 0; } + constexpr size_type max_size() const { return 0; } + + /* lookup */ + + template + constexpr std::size_t count(KeyType const &) const { return 0; } + + template + constexpr const_iterator find(KeyType const &) const { return end(); } + template + constexpr iterator find(KeyType const &) { return end(); } + + template + constexpr std::pair + equal_range(KeyType const &) const { return {end(), end()}; } + template + constexpr std::pair + equal_range(KeyType const &) { return {end(), end()}; } + + template + constexpr const_iterator lower_bound(KeyType const &) const { return end(); } + template + constexpr iterator lower_bound(KeyType const &) { return end(); } + + template + constexpr const_iterator upper_bound(KeyType const &) const { return end(); } + template + constexpr iterator upper_bound(KeyType const &) { return end(); } + +/* observers */ + constexpr key_compare const& key_comp() const { return value_comp().key_comp(); } + constexpr value_compare const& value_comp() const { return static_cast const&>(*this); } +}; + +template > +constexpr auto make_map(bits::ignored_arg = {}/* for consistency with the initializer below for N = 0*/) { + return map{}; +} + +template +constexpr auto make_map(std::pair const (&items)[N]) { + return map{items}; +} + +template +constexpr auto make_map(std::array, N> const &items) { + return map{items}; +} + +template +constexpr auto make_map(std::pair const (&items)[N], Compare const& compare = Compare{}) { + return map{items, compare}; +} + +template +constexpr auto make_map(std::array, N> const &items, Compare const& compare = Compare{}) { + return map{items, compare}; +} + +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/frozen/random.h b/cpp_src/vendor/frozen/random.h new file mode 100644 index 000000000..727133bb1 --- /dev/null +++ b/cpp_src/vendor/frozen/random.h @@ -0,0 +1,97 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_RANDOM_H +#define FROZEN_LETITGO_RANDOM_H + +#include "frozen/bits/algorithms.h" +#include "frozen/bits/version.h" + +#include +#include + +namespace frozen { +template +class linear_congruential_engine { + + static_assert(std::is_unsigned::value, + "UIntType must be an unsigned integral type"); + + template + static constexpr UIntType modulo(T val, std::integral_constant) { + return static_cast(val); + } + + template + static constexpr UIntType modulo(T val, std::integral_constant) { + // the static cast below may end up doing a truncation + return static_cast(val % M); + } + +public: + using result_type = UIntType; + static constexpr result_type multiplier = a; + static constexpr result_type increment = c; + static constexpr result_type modulus = m; + static constexpr result_type default_seed = 1u; + + linear_congruential_engine() = default; + constexpr linear_congruential_engine(result_type s) { seed(s); } + + void seed(result_type s = default_seed) { state_ = s; } + constexpr result_type operator()() { + using uint_least_t = bits::select_uint_least_t; + uint_least_t tmp = static_cast(multiplier) * state_ + increment; + + state_ = modulo(tmp, std::integral_constant()); + return state_; + } + constexpr void discard(unsigned long long n) { + while (n--) + operator()(); + } + static constexpr result_type min() { return increment == 0u ? 1u : 0u; } + static constexpr result_type max() { return modulus - 1u; } + friend constexpr bool operator==(linear_congruential_engine const &self, + linear_congruential_engine const &other) { + return self.state_ == other.state_; + } + friend constexpr bool operator!=(linear_congruential_engine const &self, + linear_congruential_engine const &other) { + return !(self == other); + } + +private: + result_type state_ = default_seed; +}; + +using minstd_rand0 = + linear_congruential_engine; +using minstd_rand = + linear_congruential_engine; + +// This generator is used by default in unordered frozen containers +using default_prg_t = minstd_rand; + +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/frozen/set.h b/cpp_src/vendor/frozen/set.h new file mode 100644 index 000000000..430d4a54c --- /dev/null +++ b/cpp_src/vendor/frozen/set.h @@ -0,0 +1,260 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_SET_H +#define FROZEN_SET_H + +#include "frozen/bits/algorithms.h" +#include "frozen/bits/basic_types.h" +#include "frozen/bits/constexpr_assert.h" +#include "frozen/bits/version.h" +#include "frozen/bits/defines.h" + +#include +#include + +namespace frozen { + +template > class set : private Compare { + using container_type = bits::carray; + container_type keys_; + +public: + /* container typedefs*/ + using key_type = Key; + using value_type = Key; + using size_type = typename container_type::size_type; + using difference_type = typename container_type::size_type; + using key_compare = Compare; + using value_compare = Compare; + using reference = typename container_type::const_reference; + using const_reference = reference; + using pointer = typename container_type::const_pointer; + using const_pointer = pointer; + using iterator = typename container_type::const_iterator; + using reverse_iterator = std::reverse_iterator; + using const_iterator = iterator; + using const_reverse_iterator = std::reverse_iterator; + +public: + /* constructors */ + constexpr set(const set &other) = default; + + constexpr set(container_type keys, Compare const & comp) + : Compare{comp} + , keys_(bits::quicksort(keys, value_comp())) { + } + + explicit constexpr set(container_type keys) + : set{keys, Compare{}} {} + + constexpr set(std::initializer_list keys, Compare const & comp) + : set{container_type{keys}, comp} { + constexpr_assert(keys.size() == N, "Inconsistent initializer_list size and type size argument"); + } + + constexpr set(std::initializer_list keys) + : set{keys, Compare{}} {} + + constexpr set& operator=(const set &other) = default; + + /* capacity */ + constexpr bool empty() const { return !N; } + constexpr size_type size() const { return N; } + constexpr size_type max_size() const { return N; } + + /* lookup */ + template + constexpr std::size_t count(KeyType const &key) const { + return bits::binary_search(keys_.begin(), key, value_comp()); + } + + template + constexpr const_iterator find(KeyType const &key) const { + const_iterator where = lower_bound(key); + if ((where != end()) && !value_comp()(key, *where)) + return where; + else + return end(); + } + + template + constexpr bool contains(KeyType const &key) const { + return this->find(key) != keys_.end(); + } + + template + constexpr std::pair equal_range(KeyType const &key) const { + auto const lower = lower_bound(key); + if (lower == end()) + return {lower, lower}; + else + return {lower, lower + 1}; + } + + template + constexpr const_iterator lower_bound(KeyType const &key) const { + auto const where = bits::lower_bound(keys_.begin(), key, value_comp()); + if ((where != end()) && !value_comp()(key, *where)) + return where; + else + return end(); + } + + template + constexpr const_iterator upper_bound(KeyType const &key) const { + auto const where = bits::lower_bound(keys_.begin(), key, value_comp()); + if ((where != end()) && !value_comp()(key, *where)) + return where + 1; + else + return end(); + } + + /* observers */ + constexpr const key_compare& key_comp() const { return value_comp(); } + constexpr const key_compare& value_comp() const { return static_cast(*this); } + + /* iterators */ + constexpr const_iterator begin() const { return keys_.begin(); } + constexpr const_iterator cbegin() const { return keys_.begin(); } + constexpr const_iterator end() const { return keys_.end(); } + constexpr const_iterator cend() const { return keys_.end(); } + + constexpr const_reverse_iterator rbegin() const { return const_reverse_iterator{keys_.end()}; } + constexpr const_reverse_iterator crbegin() const { return const_reverse_iterator{keys_.end()}; } + constexpr const_reverse_iterator rend() const { return const_reverse_iterator{keys_.begin()}; } + constexpr const_reverse_iterator crend() const { return const_reverse_iterator{keys_.begin()}; } + + /* comparison */ + constexpr bool operator==(set const& rhs) const { return bits::equal(begin(), end(), rhs.begin()); } + constexpr bool operator!=(set const& rhs) const { return !(*this == rhs); } + constexpr bool operator<(set const& rhs) const { return bits::lexicographical_compare(begin(), end(), rhs.begin(), rhs.end()); } + constexpr bool operator<=(set const& rhs) const { return (*this < rhs) || (*this == rhs); } + constexpr bool operator>(set const& rhs) const { return bits::lexicographical_compare(rhs.begin(), rhs.end(), begin(), end()); } + constexpr bool operator>=(set const& rhs) const { return (*this > rhs) || (*this == rhs); } +}; + +template class set : private Compare { + using container_type = bits::carray; // just for the type definitions + +public: + /* container typedefs*/ + using key_type = Key; + using value_type = Key; + using size_type = typename container_type::size_type; + using difference_type = typename container_type::size_type; + using key_compare = Compare; + using value_compare = Compare; + using reference = typename container_type::const_reference; + using const_reference = reference; + using pointer = typename container_type::const_pointer; + using const_pointer = pointer; + using iterator = pointer; + using reverse_iterator = pointer; + using const_iterator = const_pointer; + using const_reverse_iterator = const_pointer; + +public: + /* constructors */ + constexpr set(const set &other) = default; + constexpr set(bits::carray, Compare const &) {} + explicit constexpr set(bits::carray) {} + + constexpr set(std::initializer_list, Compare const &comp) + : Compare{comp} {} + constexpr set(std::initializer_list keys) : set{keys, Compare{}} {} + + constexpr set& operator=(const set &other) = default; + + /* capacity */ + constexpr bool empty() const { return true; } + constexpr size_type size() const { return 0; } + constexpr size_type max_size() const { return 0; } + + /* lookup */ + template + constexpr std::size_t count(KeyType const &) const { return 0; } + + template + constexpr const_iterator find(KeyType const &) const { return end(); } + + template + constexpr std::pair + equal_range(KeyType const &) const { return {end(), end()}; } + + template + constexpr const_iterator lower_bound(KeyType const &) const { return end(); } + + template + constexpr const_iterator upper_bound(KeyType const &) const { return end(); } + + /* observers */ + constexpr const key_compare& key_comp() const { return value_comp(); } + constexpr const key_compare& value_comp() const { return static_cast(*this); } + + /* iterators */ + constexpr const_iterator begin() const { return nullptr; } + constexpr const_iterator cbegin() const { return nullptr; } + constexpr const_iterator end() const { return nullptr; } + constexpr const_iterator cend() const { return nullptr; } + + constexpr const_reverse_iterator rbegin() const { return nullptr; } + constexpr const_reverse_iterator crbegin() const { return nullptr; } + constexpr const_reverse_iterator rend() const { return nullptr; } + constexpr const_reverse_iterator crend() const { return nullptr; } +}; + +template +constexpr auto make_set(bits::ignored_arg = {}/* for consistency with the initializer below for N = 0*/) { + return set{}; +} + +template +constexpr auto make_set(const T (&args)[N]) { + return set(args); +} + +template +constexpr auto make_set(std::array const &args) { + return set(args); +} + +template +constexpr auto make_set(const T (&args)[N], Compare const& compare = Compare{}) { + return set(args, compare); +} + +template +constexpr auto make_set(std::array const &args, Compare const& compare = Compare{}) { + return set(args, compare); +} + +#ifdef FROZEN_LETITGO_HAS_DEDUCTION_GUIDES + +template +set(T, Args...) -> set; + +#endif // FROZEN_LETITGO_HAS_DEDUCTION_GUIDES + +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/frozen/string.h b/cpp_src/vendor/frozen/string.h new file mode 100644 index 000000000..354ed9c15 --- /dev/null +++ b/cpp_src/vendor/frozen/string.h @@ -0,0 +1,152 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_STRING_H +#define FROZEN_LETITGO_STRING_H + +#include "frozen/bits/elsa.h" +#include "frozen/bits/hash_string.h" +#include "frozen/bits/version.h" +#include "frozen/bits/defines.h" + +#include +#include + +#ifdef FROZEN_LETITGO_HAS_STRING_VIEW +#include +#endif + +namespace frozen { + +template +class basic_string { + using chr_t = _CharT; + + chr_t const *data_; + std::size_t size_; + +public: + template + constexpr basic_string(chr_t const (&data)[N]) + : data_(data), size_(N - 1) {} + constexpr basic_string(chr_t const *data, std::size_t size) + : data_(data), size_(size) {} + +#ifdef FROZEN_LETITGO_HAS_STRING_VIEW + constexpr basic_string(std::basic_string_view data) + : data_(data.data()), size_(data.size()) {} +#endif + + constexpr basic_string(const basic_string &) noexcept = default; + constexpr basic_string &operator=(const basic_string &) noexcept = default; + + constexpr std::size_t size() const { return size_; } + + constexpr chr_t operator[](std::size_t i) const { return data_[i]; } + + constexpr bool operator==(basic_string other) const { + if (size_ != other.size_) + return false; + for (std::size_t i = 0; i < size_; ++i) + if (data_[i] != other.data_[i]) + return false; + return true; + } + + constexpr bool operator<(const basic_string &other) const { + unsigned i = 0; + while (i < size() && i < other.size()) { + if ((*this)[i] < other[i]) { + return true; + } + if ((*this)[i] > other[i]) { + return false; + } + ++i; + } + return size() < other.size(); + } + + friend constexpr bool operator>(const basic_string& lhs, const basic_string& rhs) { + return rhs < lhs; + } + + constexpr const chr_t *data() const { return data_; } + constexpr const chr_t *begin() const { return data(); } + constexpr const chr_t *end() const { return data() + size(); } +}; + +template struct elsa> { + constexpr std::size_t operator()(basic_string<_CharT> value) const { + return hash_string(value); + } + constexpr std::size_t operator()(basic_string<_CharT> value, std::size_t seed) const { + return hash_string(value, seed); + } +}; + +using string = basic_string; +using wstring = basic_string; +using u16string = basic_string; +using u32string = basic_string; + +#ifdef FROZEN_LETITGO_HAS_CHAR8T +using u8string = basic_string; +#endif + +namespace string_literals { + +constexpr string operator"" _s(const char *data, std::size_t size) { + return {data, size}; +} + +constexpr wstring operator"" _s(const wchar_t *data, std::size_t size) { + return {data, size}; +} + +constexpr u16string operator"" _s(const char16_t *data, std::size_t size) { + return {data, size}; +} + +constexpr u32string operator"" _s(const char32_t *data, std::size_t size) { + return {data, size}; +} + +#ifdef FROZEN_LETITGO_HAS_CHAR8T +constexpr u8string operator"" _s(const char8_t *data, std::size_t size) { + return {data, size}; +} +#endif + +} // namespace string_literals + +} // namespace frozen + +namespace std { +template struct hash> { + std::size_t operator()(frozen::basic_string<_CharT> s) const { + return frozen::elsa>{}(s); + } +}; +} // namespace std + +#endif diff --git a/cpp_src/vendor/frozen/unordered_map.h b/cpp_src/vendor/frozen/unordered_map.h new file mode 100644 index 000000000..6f7b4a009 --- /dev/null +++ b/cpp_src/vendor/frozen/unordered_map.h @@ -0,0 +1,217 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_UNORDERED_MAP_H +#define FROZEN_LETITGO_UNORDERED_MAP_H + +#include "frozen/bits/basic_types.h" +#include "frozen/bits/constexpr_assert.h" +#include "frozen/bits/elsa.h" +#include "frozen/bits/exceptions.h" +#include "frozen/bits/pmh.h" +#include "frozen/bits/version.h" +#include "frozen/random.h" + +#include +#include +#include + +namespace frozen { + +namespace bits { + +struct GetKey { + template constexpr auto const &operator()(KV const &kv) const { + return kv.first; + } +}; + +} // namespace bits + +template , + class KeyEqual = std::equal_to> +class unordered_map : private KeyEqual { + static constexpr std::size_t storage_size = + bits::next_highest_power_of_two(N) * (N < 32 ? 2 : 1); // size adjustment to prevent high collision rate for small sets + using container_type = bits::carray, N>; + using tables_type = bits::pmh_tables; + + container_type items_; + tables_type tables_; + +public: + /* typedefs */ + using Self = unordered_map; + using key_type = Key; + using mapped_type = Value; + using value_type = typename container_type::value_type; + using size_type = typename container_type::size_type; + using difference_type = typename container_type::difference_type; + using hasher = Hash; + using key_equal = KeyEqual; + using reference = typename container_type::reference; + using const_reference = typename container_type::const_reference; + using pointer = typename container_type::pointer; + using const_pointer = typename container_type::const_pointer; + using iterator = typename container_type::iterator; + using const_iterator = typename container_type::const_iterator; + +public: + /* constructors */ + unordered_map(unordered_map const &) = default; + constexpr unordered_map(container_type items, + Hash const &hash, KeyEqual const &equal) + : KeyEqual{equal} + , items_{items} + , tables_{ + bits::make_pmh_tables( + items_, hash, bits::GetKey{}, default_prg_t{})} {} + explicit constexpr unordered_map(container_type items) + : unordered_map{items, Hash{}, KeyEqual{}} {} + + constexpr unordered_map(std::initializer_list items, + Hash const & hash, KeyEqual const & equal) + : unordered_map{container_type{items}, hash, equal} { + constexpr_assert(items.size() == N, "Inconsistent initializer_list size and type size argument"); + } + + constexpr unordered_map(std::initializer_list items) + : unordered_map{items, Hash{}, KeyEqual{}} {} + + /* iterators */ + constexpr iterator begin() { return items_.begin(); } + constexpr iterator end() { return items_.end(); } + constexpr const_iterator begin() const { return items_.begin(); } + constexpr const_iterator end() const { return items_.end(); } + constexpr const_iterator cbegin() const { return items_.begin(); } + constexpr const_iterator cend() const { return items_.end(); } + + /* capacity */ + constexpr bool empty() const { return !N; } + constexpr size_type size() const { return N; } + constexpr size_type max_size() const { return N; } + + /* lookup */ + template + constexpr std::size_t count(KeyType const &key) const { + return find(key) != end(); + } + + template + constexpr Value const &at(KeyType const &key) const { + return at_impl(*this, key); + } + template + constexpr Value &at(KeyType const &key) { + return at_impl(*this, key); + } + + template + constexpr const_iterator find(KeyType const &key) const { + return find_impl(*this, key, hash_function(), key_eq()); + } + template + constexpr iterator find(KeyType const &key) { + return find_impl(*this, key, hash_function(), key_eq()); + } + + template + constexpr bool contains(KeyType const &key) const { + return this->find(key) != this->end(); + } + + template + constexpr std::pair equal_range(KeyType const &key) const { + return equal_range_impl(*this, key); + } + template + constexpr std::pair equal_range(KeyType const &key) { + return equal_range_impl(*this, key); + } + + /* bucket interface */ + constexpr std::size_t bucket_count() const { return storage_size; } + constexpr std::size_t max_bucket_count() const { return storage_size; } + + /* observers*/ + constexpr const hasher& hash_function() const { return tables_.hash_function(); } + constexpr const key_equal& key_eq() const { return static_cast(*this); } + +private: + template + static inline constexpr auto& at_impl(This&& self, KeyType const &key) { + auto it = self.find(key); + if (it != self.end()) + return it->second; + else + FROZEN_THROW_OR_ABORT(std::out_of_range("unknown key")); + } + + template + static inline constexpr auto find_impl(This&& self, KeyType const &key, Hasher const &hash, Equal const &equal) { + auto const pos = self.tables_.lookup(key, hash); + auto it = self.items_.begin() + pos; + if (it != self.items_.end() && equal(it->first, key)) + return it; + else + return self.items_.end(); + } + + template + static inline constexpr auto equal_range_impl(This&& self, KeyType const &key) { + auto const it = self.find(key); + if (it != self.end()) + return std::make_pair(it, it + 1); + else + return std::make_pair(self.end(), self.end()); + } +}; + +template +constexpr auto make_unordered_map(std::pair const (&items)[N]) { + return unordered_map{items}; +} + +template +constexpr auto make_unordered_map( + std::pair const (&items)[N], + Hasher const &hash = elsa{}, + Equal const &equal = std::equal_to{}) { + return unordered_map{items, hash, equal}; +} + +template +constexpr auto make_unordered_map(std::array, N> const &items) { + return unordered_map{items}; +} + +template +constexpr auto make_unordered_map( + std::array, N> const &items, + Hasher const &hash = elsa{}, + Equal const &equal = std::equal_to{}) { + return unordered_map{items, hash, equal}; +} + +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/frozen/unordered_set.h b/cpp_src/vendor/frozen/unordered_set.h new file mode 100644 index 000000000..81bca6c5f --- /dev/null +++ b/cpp_src/vendor/frozen/unordered_set.h @@ -0,0 +1,181 @@ +/* + * Frozen + * Copyright 2016 QuarksLab + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#ifndef FROZEN_LETITGO_UNORDERED_SET_H +#define FROZEN_LETITGO_UNORDERED_SET_H + +#include "frozen/bits/basic_types.h" +#include "frozen/bits/constexpr_assert.h" +#include "frozen/bits/elsa.h" +#include "frozen/bits/pmh.h" +#include "frozen/bits/version.h" +#include "frozen/random.h" + +#include + +namespace frozen { + +namespace bits { + +struct Get { + template constexpr T const &operator()(T const &key) const { + return key; + } +}; + +} // namespace bits + +template , + class KeyEqual = std::equal_to> +class unordered_set : private KeyEqual { + static constexpr std::size_t storage_size = + bits::next_highest_power_of_two(N) * (N < 32 ? 2 : 1); // size adjustment to prevent high collision rate for small sets + using container_type = bits::carray; + using tables_type = bits::pmh_tables; + + container_type keys_; + tables_type tables_; + +public: + /* typedefs */ + using key_type = Key; + using value_type = Key; + using size_type = typename container_type::size_type; + using difference_type = typename container_type::difference_type; + using hasher = Hash; + using key_equal = KeyEqual; + using const_reference = typename container_type::const_reference; + using reference = const_reference; + using const_pointer = typename container_type::const_pointer; + using pointer = const_pointer; + using const_iterator = typename container_type::const_iterator; + using iterator = const_iterator; + +public: + /* constructors */ + unordered_set(unordered_set const &) = default; + constexpr unordered_set(container_type keys, Hash const &hash, + KeyEqual const &equal) + : KeyEqual{equal} + , keys_{keys} + , tables_{bits::make_pmh_tables( + keys_, hash, bits::Get{}, default_prg_t{})} {} + explicit constexpr unordered_set(container_type keys) + : unordered_set{keys, Hash{}, KeyEqual{}} {} + + constexpr unordered_set(std::initializer_list keys) + : unordered_set{keys, Hash{}, KeyEqual{}} {} + + constexpr unordered_set(std::initializer_list keys, Hash const & hash, KeyEqual const & equal) + : unordered_set{container_type{keys}, hash, equal} { + constexpr_assert(keys.size() == N, "Inconsistent initializer_list size and type size argument"); + } + + /* iterators */ + constexpr const_iterator begin() const { return keys_.begin(); } + constexpr const_iterator end() const { return keys_.end(); } + constexpr const_iterator cbegin() const { return keys_.begin(); } + constexpr const_iterator cend() const { return keys_.end(); } + + /* capacity */ + constexpr bool empty() const { return !N; } + constexpr size_type size() const { return N; } + constexpr size_type max_size() const { return N; } + + /* lookup */ + template + constexpr std::size_t count(KeyType const &key) const { + return find(key, hash_function(), key_eq()) != end(); + } + + template + constexpr const_iterator find(KeyType const &key, Hasher const &hash, Equal const &equal) const { + auto const pos = tables_.lookup(key, hash); + auto it = keys_.begin() + pos; + if (it != keys_.end() && equal(*it, key)) + return it; + else + return keys_.end(); + } + template + constexpr const_iterator find(KeyType const &key) const { + auto const pos = tables_.lookup(key, hash_function()); + auto it = keys_.begin() + pos; + if (it != keys_.end() && key_eq()(*it, key)) + return it; + else + return keys_.end(); + } + + template + constexpr bool contains(KeyType const &key) const { + return this->find(key) != keys_.end(); + } + + template + constexpr std::pair equal_range(KeyType const &key) const { + auto const it = find(key); + if (it != end()) + return {it, it + 1}; + else + return {keys_.end(), keys_.end()}; + } + + /* bucket interface */ + constexpr std::size_t bucket_count() const { return storage_size; } + constexpr std::size_t max_bucket_count() const { return storage_size; } + + /* observers*/ + constexpr const hasher& hash_function() const { return tables_.hash_function(); } + constexpr const key_equal& key_eq() const { return static_cast(*this); } +}; + +template +constexpr auto make_unordered_set(T const (&keys)[N]) { + return unordered_set{keys}; +} + +template +constexpr auto make_unordered_set(T const (&keys)[N], Hasher const& hash, Equal const& equal) { + return unordered_set{keys, hash, equal}; +} + +template +constexpr auto make_unordered_set(std::array const &keys) { + return unordered_set{keys}; +} + +template +constexpr auto make_unordered_set(std::array const &keys, Hasher const& hash, Equal const& equal) { + return unordered_set{keys, hash, equal}; +} + +#ifdef FROZEN_LETITGO_HAS_DEDUCTION_GUIDES + +template +unordered_set(T, Args...) -> unordered_set; + +#endif + +} // namespace frozen + +#endif diff --git a/cpp_src/vendor/gason/gason.cc b/cpp_src/vendor/gason/gason.cc index 5dc549da9..e13c27768 100644 --- a/cpp_src/vendor/gason/gason.cc +++ b/cpp_src/vendor/gason/gason.cc @@ -351,7 +351,8 @@ const JsonNode &JsonNode::operator[](std::string_view key) const { } for (auto &v : (*this)) if (std::string_view(v.key) == key) return v; - static JsonNode empty_node{{JsonTag(JSON_EMPTY)}, nullptr, {}}; + // TODO: Remove NOLINT after pyreindexer update. Issue #1736 + static JsonNode empty_node{{JsonTag(JSON_EMPTY)}, nullptr, {}}; // NOLINT(*EnumCastOutOfRange) return empty_node; } diff --git a/cpp_src/vendor/gason/gason.h b/cpp_src/vendor/gason/gason.h index 3d5b58e35..906634e67 100644 --- a/cpp_src/vendor/gason/gason.h +++ b/cpp_src/vendor/gason/gason.h @@ -26,6 +26,8 @@ enum JsonTag : uint8_t { JSON_NULL = 0xF, }; +// TODO: Move this to the JsonTag-enum, when pyreindexer deploy will be fixed. Currently this would break old pyrx builds +// Issue #1736 constexpr uint8_t JSON_EMPTY = 0xFF; struct JsonNode; @@ -75,7 +77,8 @@ union JsonValue { u.tag = tag; ival = uintptr_t(payload); } - JsonTag getTag() const noexcept { return JsonTag(u.tag); } + // TODO: Remove NOLINT after pyreindexer update. Issue #1736 + JsonTag getTag() const noexcept { return JsonTag(u.tag); } // NOLINT(*EnumCastOutOfRange) int64_t toNumber() const { assertrx(getTag() == JSON_NUMBER || getTag() == JSON_DOUBLE); @@ -155,7 +158,7 @@ struct JsonNode { } const JsonNode &operator[](std::string_view sv) const; - bool empty() const noexcept { return value.getTag() == JsonTag(JSON_EMPTY); } + bool empty() const noexcept { return uint8_t(value.getTag()) == JSON_EMPTY; } bool isObject() const noexcept { return value.getTag() == JSON_OBJECT; } JsonNode *toNode() const; }; diff --git a/cpp_src/vendor/libstemmer/runtime/api.c b/cpp_src/vendor/libstemmer/runtime/api.c index 40039ef4a..e348f06e6 100644 --- a/cpp_src/vendor/libstemmer/runtime/api.c +++ b/cpp_src/vendor/libstemmer/runtime/api.c @@ -49,7 +49,7 @@ extern void SN_close_env(struct SN_env * z, int S_size) { lose_s(z->S[i]); } - free(z->S); + free(z->S); // NOLINT(bugprone-multi-level-implicit-pointer-conversion) } free(z->I); free(z->B); diff --git a/cpp_src/vendor/spdlog/details/registry.h b/cpp_src/vendor/spdlog/details/registry.h index 94b2672ea..dd9fea848 100644 --- a/cpp_src/vendor/spdlog/details/registry.h +++ b/cpp_src/vendor/spdlog/details/registry.h @@ -30,7 +30,7 @@ template class registry_t { public: - registry_t(const registry_t&) = delete; + registry_t(const registry_t&) = delete; registry_t& operator=(const registry_t&) = delete; void register_logger(std::shared_ptr logger) @@ -197,7 +197,7 @@ class registry_t } private: - registry_t() = default; + registry_t() = default; void throw_if_exists(const std::string &logger_name) { diff --git a/cpp_src/vendor/spdlog/fmt/bundled/format.cc b/cpp_src/vendor/spdlog/fmt/bundled/format.cc index be14d3263..452cb5507 100644 --- a/cpp_src/vendor/spdlog/fmt/bundled/format.cc +++ b/cpp_src/vendor/spdlog/fmt/bundled/format.cc @@ -80,7 +80,6 @@ static inline fmt::internal::Null<> strerror_s(char *, std::size_t, ...) { retur namespace fmt { FMT_FUNC internal::RuntimeError::~RuntimeError() FMT_DTOR_NOEXCEPT {} -FMT_FUNC FormatError::~FormatError() FMT_DTOR_NOEXCEPT {} FMT_FUNC SystemError::~SystemError() FMT_DTOR_NOEXCEPT {} namespace { @@ -253,14 +252,6 @@ const uint64_t internal::BasicData::POWERS_OF_10_64[] = {0, FMT_POWERS_OF_10( // to avoid warnings about C++98 not supporting long long. ULongLong(1000000000) * ULongLong(1000000000) * 10}; -FMT_FUNC void internal::report_unknown_type(char code, const char *type) { - (void)type; - if (std::isprint(static_cast(code))) { - FMT_THROW(FormatError(format("unknown format code '{}' for {}", code, type))); - } - FMT_THROW(FormatError(format("unknown format code '\\x{:02x}' for {}", static_cast(code), type))); -} - #if FMT_USE_WINDOWS_H FMT_FUNC internal::UTF8ToUTF16::UTF8ToUTF16(StringRef s) { diff --git a/cpp_src/vendor/spdlog/fmt/bundled/format.h b/cpp_src/vendor/spdlog/fmt/bundled/format.h index 0efdd6c3d..f4959cf00 100644 --- a/cpp_src/vendor/spdlog/fmt/bundled/format.h +++ b/cpp_src/vendor/spdlog/fmt/bundled/format.h @@ -170,13 +170,7 @@ typedef __int64 intmax_t; #endif // Use the compiler's attribute noreturn -#if defined(__MINGW32__) || defined(__MINGW64__) -#define FMT_NORETURN __attribute__((noreturn)) -#elif FMT_HAS_CPP_ATTRIBUTE(noreturn) && __cplusplus >= 201103L #define FMT_NORETURN [[noreturn]] -#else -#define FMT_NORETURN -#endif #ifndef FMT_USE_VARIADIC_TEMPLATES // Variadic templates are available in GCC since version 4.4 @@ -646,7 +640,7 @@ class FormatError : public std::runtime_error { public: explicit FormatError(CStringRef message) : std::runtime_error(message.c_str()) {} FormatError(const FormatError &ferr) : std::runtime_error(ferr) {} - FMT_API ~FormatError() FMT_DTOR_NOEXCEPT FMT_OVERRIDE; + ~FormatError() FMT_DTOR_NOEXCEPT FMT_OVERRIDE {} }; namespace internal { @@ -951,7 +945,7 @@ struct IntTraits { typedef typename TypeSelector::digits <= 32>::Type MainType; }; -FMT_API FMT_NORETURN void report_unknown_type(char code, const char *type); +FMT_NORETURN static void report_unknown_type(char code, const char *type); // Static data is placed in this class template to allow header-only // configuration. @@ -3885,6 +3879,20 @@ void format_arg(fmt::BasicFormatter &f, const Char *&format_ } format_str = end + 1; } + +namespace internal { + +// This function was moved to the h-file to avoid GCC 12 LTO build error +FMT_NORETURN static void report_unknown_type(char code, const char *type) { + (void)type; + if (std::isprint(static_cast(code))) { + FMT_THROW(FormatError(format("unknown format code '{}' for {}", code, type))); + } + FMT_THROW(FormatError(format("unknown format code '\\x{:02x}' for {}", static_cast(code), type))); +} + +} // namespace internal + } // namespace fmt #if FMT_USE_USER_DEFINED_LITERALS @@ -3926,7 +3934,7 @@ inline namespace literals { \endrst */ inline internal::UdlFormat operator"" _format(const char *s, std::size_t) { return {s}; } -inline internal::UdlFormat operator"" _format(const wchar_t *s, std::size_t) { return {s}; } +inline internal::UdlFormat operator"" _format(const wchar_t * s, std::size_t) { return {s}; } /** \rst @@ -3939,7 +3947,7 @@ inline internal::UdlFormat operator"" _format(const wchar_t *s, std::si \endrst */ inline internal::UdlArg operator"" _a(const char *s, std::size_t) { return {s}; } -inline internal::UdlArg operator"" _a(const wchar_t *s, std::size_t) { return {s}; } +inline internal::UdlArg operator"" _a(const wchar_t * s, std::size_t) { return {s}; } } // namespace literals } // namespace fmt diff --git a/cpp_src/vendor/yaml-cpp/exp.cpp b/cpp_src/vendor/yaml-cpp/exp.cpp index 814e68894..f238b7998 100644 --- a/cpp_src/vendor/yaml-cpp/exp.cpp +++ b/cpp_src/vendor/yaml-cpp/exp.cpp @@ -124,7 +124,6 @@ std::string Escape(Stream& in) { default:; } - std::stringstream msg; throw ParserException(in.mark(), std::string(ErrorMsg::INVALID_ESCAPE) + ch); } } // namespace Exp diff --git a/cpp_src/vendor/yaml-cpp/scantoken.cpp b/cpp_src/vendor/yaml-cpp/scantoken.cpp index 8d10ec24c..77e4a3f66 100644 --- a/cpp_src/vendor/yaml-cpp/scantoken.cpp +++ b/cpp_src/vendor/yaml-cpp/scantoken.cpp @@ -18,9 +18,6 @@ namespace YAML { // Directive // . Note: no semantic checking is done here (that's for the parser to do) void Scanner::ScanDirective() { - std::string name; - std::vector params; - // pop indents and simple keys PopAllIndents(); PopAllSimpleKeys(); diff --git a/query.go b/query.go index 7a001df5d..be5643f4c 100644 --- a/query.go +++ b/query.go @@ -22,7 +22,7 @@ type QueryStrictMode int const ( queryStrictModeNotSet QueryStrictMode = bindings.QueryStrictModeNotSet - QueryStrictModeNone = bindings.QueryStrictModeNone // Allows any fields in coditions, but doesn't check actual values for non-existing names + QueryStrictModeNone = bindings.QueryStrictModeNone // Allows any fields in conditions, but doesn't check actual values for non-existing names QueryStrictModeNames = bindings.QueryStrictModeNames // Allows only valid fields and indexes in conditions. Otherwise query will return error QueryStrictModeIndexes = bindings.QueryStrictModeIndexes // Allows only indexes in conditions. Otherwise query will return error ) @@ -227,7 +227,7 @@ func (q *Query) makeCopy(db *reindexerImpl, root *Query) *Query { qC.joinToFields = append(q.joinToFields[:0:0], q.joinToFields...) qC.joinHandlers = append(q.joinHandlers[:0:0], q.joinHandlers...) - //TODO not realycopy + //TODO not real copy qC.context = q.context qC.joinType = q.joinType qC.nsArray = append(q.nsArray[:0:0], q.nsArray...) @@ -632,7 +632,7 @@ func (q *Query) Sort(sortIndex string, desc bool, values ...interface{}) *Query return q } -// SortStDistance - wrapper for geometry sorting by shortes distance between geometry field and point (ST_Distance) +// SortStDistance - wrapper for geometry sorting by shortest distance between geometry field and point (ST_Distance) func (q *Query) SortStPointDistance(field string, p Point, desc bool) *Query { var sb strings.Builder sb.Grow(256) @@ -646,7 +646,7 @@ func (q *Query) SortStPointDistance(field string, p Point, desc bool) *Query { return q.Sort(sb.String(), desc) } -// SortStDistance - wrapper for geometry sorting by shortes distance between 2 geometry fields (ST_Distance) +// SortStDistance - wrapper for geometry sorting by shortest distance between 2 geometry fields (ST_Distance) func (q *Query) SortStFieldDistance(field1 string, field2 string, desc bool) *Query { var sb strings.Builder sb.Grow(256) @@ -659,7 +659,7 @@ func (q *Query) SortStFieldDistance(field1 string, field2 string, desc bool) *Qu } // AND - next condition will added with AND -// This is the default operation for WHERE statement. Do not have to be called explicitly in user's code. Used in DSL convertion +// This is the default operation for WHERE statement. Do not have to be called explicitly in user's code. Used in DSL conversion func (q *Query) And() *Query { q.nextOp = opAND return q @@ -851,13 +851,13 @@ func (q *Query) panicTrace(msg string) { } // Delete will execute query, and delete items, matches query -// On sucess return number of deleted elements +// On success return number of deleted elements func (q *Query) Delete() (int, error) { return q.DeleteCtx(context.Background()) } // DeleteCtx will execute query, and delete items, matches query -// On sucess return number of deleted elements +// On success return number of deleted elements func (q *Query) DeleteCtx(ctx context.Context) (int, error) { if q.root != nil || len(q.joinQueries) != 0 { return 0, errors.New("Delete does not support joined queries") @@ -1002,19 +1002,19 @@ func (q *Query) SetExpression(field string, value string) *Query { } // Update will execute query, and update fields in items, which matches query -// On sucess return number of update elements +// On success return number of update elements func (q *Query) Update() *Iterator { return q.UpdateCtx(context.Background()) } // UpdateCtx will execute query, and update fields in items, which matches query -// On sucess return number of update elements +// On success return number of update elements func (q *Query) UpdateCtx(ctx context.Context) *Iterator { if q.root != nil || len(q.joinQueries) != 0 { return errIterator(errors.New("Update does not support joined queries")) } if q.closed { - q.panicTrace("Update call on already closed query. You shoud create new Query") + q.panicTrace("Update call on already closed query. You should create new Query") } q.executed = true @@ -1132,7 +1132,7 @@ func (q *Query) LeftJoin(q2 *Query, field string) *Query { // Handler will be always set to the main query func (q *Query) JoinHandler(field string, handler JoinHandler) *Query { if q.root != nil { - // Joined queries can not have JoinHandlers themselfs. Routing this call to the root query if current query is joined + // Joined queries can not have JoinHandlers themselves. Routing this call to the root query if current query is joined for _, jq := range q.root.joinQueries { if q == jq { q.root.JoinHandler(field, handler) diff --git a/readme.md b/readme.md index 81e889d51..12bad0d61 100644 --- a/readme.md +++ b/readme.md @@ -19,7 +19,7 @@ about reindexer server and HTTP API refer to There are two LTS-versions of reindexer available: v3.x.x and v4.x.x. 3.x.x is currently our mainstream branch and 4.x.x (release/4 branch) is beta-version with experimental RAFT-cluster and sharding support. -Storages are compatible between those versions, however, replication configs are totally different. Versions 3 and 4 are geting all the same bugfixes and features (except replication-related ones). +Storages are compatible between those versions, however, replication configs are totally different. Versions 3 and 4 are getting all the same bugfixes and features (except replication-related ones). # Table of contents: @@ -273,24 +273,24 @@ String literals should be enclosed in single quotes. Composite indexes should be enclosed in double quotes. ```sql - SELECT * FROM items WHERE "field1+field2" = 'Vasya' +SELECT * FROM items WHERE "field1+field2" = 'Vasya' ``` If the field name does not start with alpha, '_' or '#' it must be enclosed in double quotes, examples: ```sql - UPDATE items DROP "123" +UPDATE items DROP "123" ``` ```sql - SELECT * FROM ns WHERE "123" = 'some_value' +SELECT * FROM ns WHERE "123" = 'some_value' ``` ```sql - SELECT * FROM ns WHERE "123abc" = 123 +SELECT * FROM ns WHERE "123abc" = 123 ``` ```sql - DELETE FROM ns WHERE "123abc123" = 111 +DELETE FROM ns WHERE "123abc123" = 111 ``` Simple Joins may be done via default SQL syntax: @@ -588,7 +588,7 @@ Go example: ``` SQL example: ```sql - SELECT * FROM items WHERE fields LIKE 'pattern' +SELECT * FROM items WHERE fields LIKE 'pattern' ``` 'me_t' corresponds to 'meet', 'meat', 'melt' and so on @@ -622,13 +622,13 @@ including functions like `now()`, `sec()` and `serial()`. To use expressions fro To make an array-field empty ```sql -UPDATE NS SET arrayfield = [] where id = 100 +UPDATE NS SET arrayfield = [] WHERE id = 100 ``` and set it to null ```sql -UPDATE NS SET field = null where id > 100 +UPDATE NS SET field = NULL WHERE id > 100 ``` In case of non-indexed fields, setting its value to a value of a different type will replace it completely; in case of indexed fields, it is only possible to convert it from adjacent type (integral types and bool), numeric strings (like "123456") to integral types and back. Setting indexed field to null resets it to a default value. @@ -636,13 +636,13 @@ In case of non-indexed fields, setting its value to a value of a different type It is possible to add new fields to existing items ```sql -UPDATE Ns set newField = 'Brand new!' where id > 100 +UPDATE NS SET newField = 'Brand new!' WHERE id > 100 ``` and even add a new field by a complex nested path like this ```sql -UPDATE Ns set nested.nested2.nested3.nested4.newField = 'new nested field!' where id > 100 +UPDATE NS SET nested.nested2.nested3.nested4.newField = 'new nested field!' WHERE id > 100 ``` will create the following nested objects: nested, nested2, nested3, nested4 and newField as a member of object nested4. @@ -680,7 +680,7 @@ In this case, `Map` in golang can only work with string as a key. `map[string]in Updating of object field by Sql statement: ```sql -UPDATE clients SET client_data = {"Name":"John Doe","Age":40,"Address":"Fifth Avenue, Manhattan","Occupation":"Bank Manager","TaxYear":1999,"TaxConsultant":"Jane Smith"} where id = 100; +UPDATE clients SET client_data = {"Name":"John Doe","Age":40,"Address":"Fifth Avenue, Manhattan","Occupation":"Bank Manager","TaxYear":1999,"TaxConsultant":"Jane Smith"} WHERE id = 100; ``` #### Remove field via update-query @@ -704,7 +704,7 @@ Reindexer update mechanism enables to modify array fields: to modify a certain i To update an item subscription operator syntax is used: ```sql -update ns set array[*].prices[0] = 9999 where id = 5 +UPDATE NS SET array[*].prices[0] = 9999 WHERE id = 5 ``` where `*` means all items. @@ -712,7 +712,7 @@ where `*` means all items. To update entire array the following is used: ```sql -update ns set prices = [999, 1999, 2999] where id = 9 +UPDATE NS SET prices = [999, 1999, 2999] WHERE id = 9 ``` any non-indexed field can be easily converted to array using this syntax. @@ -720,7 +720,7 @@ any non-indexed field can be easily converted to array using this syntax. Reindexer also allows to update items of object arrays: ```sql -update ns set extra.objects[0] = {"Id":0,"Description":"Updated!"} where id = 9 +UPDATE NS SET extra.objects[0] = {"Id":0,"Description":"Updated!"} WHERE id = 9 ``` also like this @@ -729,10 +729,27 @@ also like this db.Query("clients").Where("id", reindexer.EQ, 100).SetObject("extra.objects[0]", updatedValue).Update() ``` +Reindexer supports heterogeneous arrays: + +```sql +UPDATE NS SET info = ["hi", "bro", 111, 2.71] WHERE id = 9 +``` + +```golang + q := DB.Query(ns).Where("id", reindexer.EQ, 1).Set("array", []interface{}{"whatsup", 777, "bro"}) + res, err := q.Update().FetchAll() +``` + +Index array-fields support values that can be converted to an index type only. When saved, such values may change precision due to conversion. + +```sql +UPDATE NS SET prices_idx = [11, '2', 3] +``` + To remove item by index you should do the following: ```sql -update ns drop array[5] +UPDATE NS DROP array[5] ``` #### Concatenate arrays @@ -740,13 +757,13 @@ update ns drop array[5] To add items to an existing array the following syntax is supported: ```sql -update ns set integer_array = integer_array || [5,6,7,8] +UPDATE NS SET integer_array = integer_array || [5,6,7,8] ``` and ```sql -update ns set integer_array = [1,2,3,4,5] || integer_array +UPDATE NS SET integer_array = [1,2,3,4,5] || integer_array ``` The first one adds elements to the end of `integer_array`, the second one adds 5 items to the front of it. To make this code work in Golang `SetExpression()` should be used instead of `Set()`. @@ -756,20 +773,20 @@ The first one adds elements to the end of `integer_array`, the second one adds 5 To remove items by value into an existing array the following syntax is supported: ```sql -update ns set integer_array = array_remove(integer_array, [5,6,7,8]) +UPDATE NS SET integer_array = array_remove(integer_array, [5,6,7,8]) ``` and ```sql -update ns set integer_array = array_remove_once(integer_array, [5,6,7,8]) +UPDATE NS SET integer_array = array_remove_once(integer_array, [5,6,7,8,6]) ``` The first one removes all occurrences of the listed values in `integer_array`, the second one deletes only the first occurrence found. To make this code work in Golang `SetExpression()` should be used instead of `Set()`. If you need to remove one value, you can use square brackets `[5]` or simple value `5`. ```sql -update ns set integer_array = array_remove(integer_array, [5]) +UPDATE NS SET integer_array = array_remove(integer_array, [5]) ``` ```sql update ns set integer_array = array_remove(integer_array, 5) @@ -778,7 +795,7 @@ update ns set integer_array = array_remove(integer_array, 5) Remove command can be combined with array concatenate: ```sql -update ns set integer_array = array_remove_once(integer_array, [5,6,7,8]) || [1,2,3] +UPDATE NS SET integer_array = array_remove_once(integer_array, [5,6,7,8]) || [1,2,3] ``` also like this @@ -791,10 +808,10 @@ It is possible to remove the values of the second field from the values of the f Note: The first parameter in commands is expected to be an array/field-array, the second parameter can be an array/scalar/field-array/field-scalar. For values compatibility/convertibility required ```sql -update ns set integer_array = [3] || array_remove(integer_array, integer_array2) || integer_array3 || array_remove_once(integer_array, [8,1]) || [2,4] +UPDATE NS SET integer_array = [3] || array_remove(integer_array, integer_array2) || integer_array3 || array_remove_once(integer_array, [8,1]) || [2,4] ``` ```sql -update ns set integer_array = array_remove(integer_array, integer_array2) || array_remove(integer_array, integer_array3) || array_remove_once(integer_array, [33,777]) +UPDATE NS SET integer_array = array_remove(integer_array, integer_array2) || array_remove(integer_array, integer_array3) || array_remove_once(integer_array, [33,777]) ``` ```golang @@ -809,7 +826,6 @@ For RPC clients there is transactions count limitation - each connection can't h #### Synchronous mode ```go - // Create new transaction object tx, err := db.BeginTx("items"); if err != nil { @@ -823,7 +839,6 @@ For RPC clients there is transactions count limitation - each connection can't h if err := tx.Commit(); err != nil { panic(err) } - ``` #### Async batch mode @@ -831,7 +846,6 @@ For RPC clients there is transactions count limitation - each connection can't h For speed up insertion of bulk records async mode can be used. ```go - // Create new transaction object tx, err := db.BeginTx("items"); if err != nil { @@ -1068,7 +1082,6 @@ type Item struct { // Composite index _ struct{} `reindex:"id+sub_id+sub_sub_id,,composite,pk"` } - ``` Also composite indexes are useful for sorting results by multiple fields: @@ -1142,7 +1155,6 @@ of results. Example code for aggregate `items` by `price` and `name` ```go - query := db.Query("items") query.AggregateMax("price") query.AggregateFacet("name", "price").Sort("name", true).Sort("count", false).Offset(10).Limit(100) @@ -1167,11 +1179,9 @@ Example code for aggregate `items` by `price` and `name` for _, facet := range aggFacetRes.Facets { fmt.Printf ("'%s' '%s' -> %d", facet.Values[0], facet.Values[1], facet.Count) } - ``` ```go - query := db.Query("items") query.Distinct("name").Distinct("price") iterator := query.Exec() @@ -1203,12 +1213,12 @@ For instance, we've got an array of structures: ```go type Elem struct { - F1 int `reindex:"f1"` - F2 int `reindex:"f2"` + F1 int `reindex:"f1"` + F2 int `reindex:"f2"` } type A struct { - Elems []Elem + Elems []Elem } ``` @@ -1257,15 +1267,14 @@ These functions can be passed to Upsert/Insert/Update in 3-rd and next arguments If these functions are provided, the passed by reference item will be changed to updated value ```go - // set ID field from serial generator - db.Insert ("items",&item,"id=serial()") - - // set current timestamp in nanoseconds to updated_at field - db.Update ("items",&item,"updated_at=now(NSEC)") + // set ID field from serial generator + db.Insert ("items",&item,"id=serial()") - // set current timestamp and ID - db.Upsert ("items",&item,"updated_at=now(NSEC)","id=serial()") + // set current timestamp in nanoseconds to updated_at field + db.Update ("items",&item,"updated_at=now(NSEC)") + // set current timestamp and ID + db.Upsert ("items",&item,"updated_at=now(NSEC)","id=serial()") ``` ### Expire Data from Namespace by Setting TTL @@ -1277,12 +1286,12 @@ Reindexer makes it possible to set TTL (time to live) for Namespace items. Addin Ttl indexes work only with int64 fields and store UNIX timestamp data. Items containing ttl index expire after `expire_after` seconds. Example of declaring TtlIndex in Golang: ```go - type NamespaceExample struct { - ID int `reindex:"id,,pk" json:"id"` - Date int64 `reindex:"date,ttl,,expire_after=3600" json:"date"` - } - ... - ns.Date = time.Now().Unix() +type NamespaceExample struct { + ID int `reindex:"id,,pk" json:"id"` + Date int64 `reindex:"date,ttl,,expire_after=3600" json:"date"` +} +... + ns.Date = time.Now().Unix() ``` In this case items of namespace NamespaceExample expire in 3600 seconds after NamespaceExample.Date field value (which is UNIX timestamp). @@ -1602,9 +1611,9 @@ Python version >=3.6 is required. ### Reindexer for Java -- *Support modes*: standalone, builtin, builtinserver +- *Support modes*: standalone, builtin, builtin-server - *API Used*: binary ABI, cproto -- *Dependency on reindexer library (reindexer-dev package):* yes, for builtin & builtinserver +- *Dependency on reindexer library (reindexer-dev package):* yes, for builtin & builtin-server Reindexer for java is official connector, and maintained by Reindexer's team. It supports both builtin and standalone modes. For enable builtin mode support reindexer-dev (version >= 3.1.0) should be installed. See [installation instructions](cpp_src/readme.md#Installation) for details. @@ -1612,9 +1621,9 @@ For enable builtin mode support reindexer-dev (version >= 3.1.0) should be insta For install reindexer to Java or Kotlin project add the following lines to maven project file ``` - com.github.restream - rx-connector - [LATEST_VERSION] + com.github.restream + rx-connector + [LATEST_VERSION] ``` URL: https://github.com/Restream/reindexer-java diff --git a/test/dsl_test.go b/test/dsl_test.go index ae2740b19..35d317475 100644 --- a/test/dsl_test.go +++ b/test/dsl_test.go @@ -19,7 +19,7 @@ type TestDSLItem struct { type TestDSLFtItem struct { ID int `reindex:"id,,pk"` - Description string `reindex:"description,text"` + Description string `reindex:"description,text,dense"` } type TestDSLJoinItem struct { diff --git a/test/update_test.go b/test/update_test.go index 91be7ae7d..fb48654ab 100644 --- a/test/update_test.go +++ b/test/update_test.go @@ -16,8 +16,10 @@ const ( fieldsUpdateNs = "test_items_fields_update" truncateNs = "test_truncate" removeItemsNs = "test_remove_items" - sparseArrItemNs = "sparse_array_updates" - TestUpdateWithExpressionsNs = "test_update_with_expressions_ns" + sparseArrItemNs = "test_sparse_array_update" + TestUpdateWithExpressionsNs = "test_expressions_updates" + TestUpdateHeteroArraysNs = "test_heterogeneous_array_updates" + TestUpdateHeteroArraysObjNs = "test_heterogeneous_objects_array_updates" ) type ItemWithSparseArray struct { @@ -25,10 +27,28 @@ type ItemWithSparseArray struct { Array []int64 `json:"array_idx" reindex:"array_idx,hash,sparse"` } +type ItemWithHeteroArrays struct { + ID int64 `json:"id" reindex:"id,hash,pk"` + ArrayIdx []int64 `json:"array_idx" reindex:"array_idx,hash,sparse"` + ArrayNon []interface{} `json:"array_hetero"` +} + +type Nested struct { + Field int `json:"field" reindex:"array_idx,-"` +} + +type ItemWithHeteroArraysObj struct { + ID int64 `json:"id" reindex:"id,hash,pk"` + Nested []Nested `json:"nested"` + ArrayNon []interface{} `json:"array_nonidx"` +} + func init() { tnamespaces["test_items_insert_update"] = TestItemSimple{} tnamespaces[sparseArrItemNs] = ItemWithSparseArray{} tnamespaces[TestUpdateWithExpressionsNs] = ItemWithSparseArray{} + tnamespaces[TestUpdateHeteroArraysNs] = ItemWithHeteroArrays{} + tnamespaces[TestUpdateHeteroArraysObjNs] = ItemWithHeteroArraysObj{} } var checkInsertUpdateExistsData = []*TestItemSimple{ @@ -420,7 +440,7 @@ func CheckUpdateArrayObject(t *testing.T) { Fourth: fourth, } - // Update objects[0].nested[0] witn new value (set as JSON) + // Update objects[0].nested[0] with new value (set as JSON) objJson, err := json.Marshal(obj) require.NoError(t, err) results := UpdateObjectJSON(t, "objects[0].nested[0]", objJson) @@ -445,7 +465,7 @@ func CheckUpdateArrayObject(t *testing.T) { } // Check of simultaneous update of 2 fields: object field + indexed field -func CheckSimultaniousUpdateOfFields(t *testing.T) { +func CheckSimultaneousUpdateOfFields(t *testing.T) { // Generate new value for the object field obj := randTestItemObject() objJson, err := json.Marshal(obj) @@ -725,7 +745,7 @@ func CheckNonIndexedArrayItemUpdate2(t *testing.T) { array := results[i].(*TestItemComplexObject).Objects for j := 0; j < len(array); j++ { for k := 0; k < len(array[j].Nested); k++ { - // Make sure it's size is correect + // Make sure it's size is correct equal := (len(array[j].Nested[k].Fourth) == 10) if equal { for l := 0; l < len(array[j].Nested[k].Fourth); l++ { @@ -1052,7 +1072,7 @@ func TestUpdateSparseArrayIndex(t *testing.T) { checkResultItem(t, results, emptyItem) } -func TestUpdateWithExpressions(t *testing.T) { +func TestUpdateExpressionWithArrayRemove(t *testing.T) { t.Parallel() const ns = TestUpdateWithExpressionsNs @@ -1091,3 +1111,58 @@ func TestUpdateWithExpressions(t *testing.T) { }) } + +func TestUpdateSetHeterogeneousArray(t *testing.T) { + t.Parallel() + + t.Run("update with heterogeneous array", func(t *testing.T) { + ns := TestUpdateHeteroArraysNs + item := &ItemWithHeteroArrays{ID: 1, ArrayIdx: []int64{1, 2, 3}, ArrayNon: []interface{}{3.14, "hi", "bro", 111}} + require.NoError(t, DB.Upsert(ns, item)) + + updateArr := []interface{}{"777", 333, "555"} + q := DB.Query(ns).Where("id", reindexer.EQ, 1).Set("array_idx", updateArr) + _, err := q.Update().FetchAll() + require.NoError(t, err) + + updateArrNon := []interface{}{"whatsup", 111, "bro"} + q = DB.Query(ns).Where("id", reindexer.EQ, 1).Set("array_hetero", updateArrNon) + _, err = q.Update().FetchAll() + require.NoError(t, err) + + selectText := "SELECT * FROM " + ns + " WHERE id = 1" + arrayIdxExpected := []int64{777, 333, 555} + expected := &ItemWithHeteroArrays{ID: 1, ArrayIdx: arrayIdxExpected, ArrayNon: updateArrNon} + checkResultItem(t, DB.ExecSQL(selectText), expected) + }) + + t.Run("update with heterogeneous objects array", func(t *testing.T) { + ns := TestUpdateHeteroArraysObjNs + item := &ItemWithHeteroArraysObj{ + ID: 1, + Nested: []Nested{{Field: 1}, {Field: 2}}, + ArrayNon: []interface{}{"a", map[string]int{"field": 1}, 3}, + } + require.NoError(t, DB.Upsert(ns, item)) + + // indexed arr + updateIdxArr := []interface{}{map[string]int{"field": 10}, map[string]string{"field": "20"}} + q := DB.Query(ns).Where("id", reindexer.EQ, 1).SetObject("nested", updateIdxArr) + _, err := q.Update().FetchAll() + require.NoError(t, err) + + // nonidx arr + updateNonidxArr := []interface{}{map[string]int{"field": 111}, map[string]string{"field": "abc"}} + q = DB.Query(ns).Where("id", reindexer.EQ, 1).SetObject("array_nonidx", updateNonidxArr) + _, err = q.Update().FetchAll() + require.NoError(t, err) + + selectText := "SELECT * FROM " + ns + " WHERE id = 1" + expected := &ItemWithHeteroArraysObj{ + ID: 1, + Nested: []Nested{{Field: 10}, {Field: 20}}, + ArrayNon: []interface{}{map[string]interface{}{"field": 111}, map[string]interface{}{"field": "abc"}}, + } + checkResultItem(t, DB.ExecSQL(selectText), expected) + }) +}