diff --git a/bindings/builtin/builtin.go b/bindings/builtin/builtin.go index bfd8b17ed..59eacc4a9 100644 --- a/bindings/builtin/builtin.go +++ b/bindings/builtin/builtin.go @@ -31,8 +31,13 @@ type Logger interface { Printf(level int, fmt string, msg ...interface{}) } -var logger Logger +// Separate mutexes for logger object itself and for reindexer_enable_logger call: +// logMtx provides safe access to the logger +// logEnableMtx provides atomic logic for (enable + set) and (disable + reset) procedures var logMtx sync.RWMutex +var logEnableMtx sync.Mutex +var logger Logger + var enableDebug bool var bufPool sync.Pool @@ -600,18 +605,24 @@ func CGoLogger(level int, msg string) { } } -func (binding *Builtin) EnableLogger(log bindings.Logger) { +func (binding *Builtin) setLogger(log bindings.Logger) { logMtx.Lock() defer logMtx.Unlock() logger = log +} + +func (binding *Builtin) EnableLogger(log bindings.Logger) { + logEnableMtx.Lock() + defer logEnableMtx.Unlock() + binding.setLogger(log) C.reindexer_enable_go_logger() } func (binding *Builtin) DisableLogger() { - logMtx.Lock() - defer logMtx.Unlock() + logEnableMtx.Lock() + defer logEnableMtx.Unlock() C.reindexer_disable_go_logger() - logger = nil + binding.setLogger(nil) } func (binding *Builtin) ReopenLogFiles() error { diff --git a/bindings/builtinserver/config/config.go b/bindings/builtinserver/config/config.go index 30ffe0516..4ae1d7b99 100644 --- a/bindings/builtinserver/config/config.go +++ b/bindings/builtinserver/config/config.go @@ -13,11 +13,20 @@ type StorageConf struct { Autorepair bool `yaml:"autorepair"` } +const ServerThreadingDedicated = "dedicated" +const ServerThreadingShared = "shared" + type NetConf struct { - HTTPAddr string `yaml:"httpaddr"` - RPCAddr string `yaml:"rpcaddr"` - WebRoot string `yaml:"webroot"` - Security bool `yaml:"security"` + HTTPAddr string `yaml:"httpaddr"` + HTTPThreading string `yaml:"http_threading"` // "dedicated" or "shared" + RPCAddr string `yaml:"rpcaddr"` + RPCThreading string `yaml:"rpc_threading"` // "dedicated" or "shared" + UnixRPCAddr string `yaml:"urpcaddr"` + UnixRPCThreading string `yaml:"urpc_threading"` // "dedicated" or "shared" + WebRoot string `yaml:"webroot"` + Security bool `yaml:"security"` + HttpReadTimeoutSec int `yaml:"http_read_timeout,omitempty"` + HttpWriteTimeoutSec int `yaml:"http_write_timeout,omitempty"` } type LoggerConf struct { @@ -69,9 +78,11 @@ func DefaultServerConfig() *ServerConfig { Autorepair: false, }, Net: NetConf{ - HTTPAddr: "0.0.0.0:9088", - RPCAddr: "0.0.0.0:6534", - Security: false, + HTTPAddr: "0.0.0.0:9088", + HTTPThreading: "shared", + RPCAddr: "0.0.0.0:6534", + RPCThreading: "shared", + Security: false, }, Logger: LoggerConf{ ServerLog: "stdout", diff --git a/bindings/cproto/connection.go b/bindings/cproto/connection.go index 3a9d165d8..136b316e9 100644 --- a/bindings/cproto/connection.go +++ b/bindings/cproto/connection.go @@ -198,12 +198,20 @@ func (c *connection) deadlineTicker() { } func (c *connection) connect(ctx context.Context) (err error) { + dsn := c.owner.getActiveDSN() var d net.Dialer - c.conn, err = d.DialContext(ctx, "tcp", c.owner.getActiveDSN().Host) - if err != nil { - return err + if dsn.Scheme == "cproto" { + if c.conn, err = d.DialContext(ctx, "tcp", dsn.Host); err != nil { + return err + } + c.conn.(*net.TCPConn).SetNoDelay(true) + } else { + d.LocalAddr = nil + if c.conn, err = d.DialContext(ctx, "unix", dsn.Host); err != nil { + return err + } } - c.conn.(*net.TCPConn).SetNoDelay(true) + c.rdBuf = bufio.NewReaderSize(c.conn, bufsCap) go c.writeLoop() diff --git a/bindings/cproto/cproto.go b/bindings/cproto/cproto.go index bc6ab855b..d8feb5406 100644 --- a/bindings/cproto/cproto.go +++ b/bindings/cproto/cproto.go @@ -8,6 +8,8 @@ import ( "math" "net" "net/url" + "runtime" + "strings" "sync" "sync/atomic" "time" @@ -32,6 +34,9 @@ var logMtx sync.RWMutex func init() { bindings.RegisterBinding("cproto", new(NetCProto)) + if runtime.GOOS != "windows" { + bindings.RegisterBinding("ucproto", new(NetCProto)) + } } type Logger interface { @@ -122,6 +127,16 @@ func (binding *NetCProto) Init(u []url.URL, options ...interface{}) (err error) } binding.dsn.url = u + for i := 0; i < len(binding.dsn.url); i++ { + if binding.dsn.url[i].Scheme == "ucproto" { + addrs := strings.Split(binding.dsn.url[i].Path, ":") + if len(addrs) != 2 { + return fmt.Errorf("rq: unexpected URL format for ucproto: '%s'. Expecting ':/", binding.dsn.url[i].Path) + } + binding.dsn.url[i].Host = addrs[0] + binding.dsn.url[i].Path = addrs[1] + } + } binding.connectDSN(context.Background(), connPoolSize, connPoolLBAlgorithm) binding.termCh = make(chan struct{}) go binding.pinger() diff --git a/bindings/interface.go b/bindings/interface.go index 221296b78..d87a9b442 100644 --- a/bindings/interface.go +++ b/bindings/interface.go @@ -120,7 +120,7 @@ type TxCtx struct { UserCtx context.Context } -// FetchMore interface for partial loading results (used in cproto) +// FetchMore interface for partial loading results (used in cproto/ucproto) type FetchMore interface { Fetch(ctx context.Context, offset, limit int, asJson bool) (err error) } @@ -248,7 +248,7 @@ const ( LBPowerOfTwoChoices ) -// OptionConnPoolLoadBalancing sets algorithm, which will be used to choose connection for cproto requests' balancing +// OptionConnPoolLoadBalancing sets algorithm, which will be used to choose connection for cproto/ucproto requests' balancing type OptionConnPoolLoadBalancing struct { Algorithm LoadBalancingAlgorithm } diff --git a/changelog.md b/changelog.md index 9c3b0da69..479e723fb 100644 --- a/changelog.md +++ b/changelog.md @@ -1905,5 +1905,3 @@ - [ref] EnableStorage method was deprecated - [fix] Query builder did not reset opOR after InnerJoin -## Misc - diff --git a/cjson/decoder.go b/cjson/decoder.go index 3cc9bd183..dc9667ced 100644 --- a/cjson/decoder.go +++ b/cjson/decoder.go @@ -711,7 +711,7 @@ func (dec *Decoder) Decode(cjson []byte, dest interface{}) (err error) { } }() - fieldsoutcnt := make([]int, 64, 64) + fieldsoutcnt := make([]int, MaxIndexes) ctagsPath := make([]int, 0, 8) dec.decodeValue(nil, ser, reflect.ValueOf(dest), fieldsoutcnt, ctagsPath) diff --git a/cpp_src/CMakeLists.txt b/cpp_src/CMakeLists.txt index df73c53cd..5d84a74a6 100644 --- a/cpp_src/CMakeLists.txt +++ b/cpp_src/CMakeLists.txt @@ -89,6 +89,10 @@ endif () set (EXTRA_FLAGS "") +if (WITH_ASAN AND WITH_TSAN) + message(FATAL_ERROR "You cannot use the ASAN and TSAN options at the same time, CMake will exit.") +endif() + if (WITH_ASAN) set (EXTRA_FLAGS "-fsanitize=address") add_definitions(-DREINDEX_WITH_ASAN) @@ -272,6 +276,9 @@ else() list(APPEND SRCS ${KOISHI_PATH}/fcontext/fcontext.c ${KOISHI_PATH}/fcontext/fcontext.hpp) endif() +# Static LevelDB v1.23 is built with -fno-rtti by default. To inherit our logger from leveldb's logger, this file must be built with -fno-rtti to +set_source_files_properties(${REINDEXER_SOURCE_PATH}/core/storage/leveldblogger.cc PROPERTIES COMPILE_FLAGS "-fno-rtti") + list(APPEND REINDEXER_LIBRARIES reindexer) add_library(${TARGET} STATIC ${HDRS} ${SRCS} ${VENDORS}) add_definitions(-DREINDEX_CORE_BUILD=1) @@ -695,6 +702,7 @@ if (NOT WIN32) SET(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME "server") SET(DIST_INCLUDE_FILES "tools/errors.h" "tools/serializer.h" "tools/varint.h" "tools/stringstools.h" "tools/customhash.h" "tools/assertrx.h" "tools/jsonstring.h" + "tools/verifying_updater.h" "core/reindexer.h" "core/type_consts.h" "core/item.h" "core/payload/payloadvalue.h" "core/payload/payloadiface.h" "core/indexopts.h" "core/namespacedef.h" "core/keyvalue/variant.h" "core/keyvalue/geometry.h" "core/sortingprioritiestable.h" "core/rdxcontext.h" "core/activity_context.h" "core/type_consts_helpers.h" "core/payload/fieldsset.h" "core/payload/payloadtype.h" @@ -702,8 +710,8 @@ if (NOT WIN32) "core/query/query.h" "core/query/queryentry.h" "core/queryresults/queryresults.h" "core/indexdef.h" "core/queryresults/aggregationresult.h" "core/queryresults/itemref.h" "core/namespace/stringsholder.h" "core/keyvalue/key_string.h" "core/key_value_type.h" "core/keyvalue/uuid.h" "core/expressiontree.h" "core/lsn.h" "core/cjson/tagspath.h" "core/cjson/ctag.h" - "estl/cow.h" "estl/overloaded.h" "estl/one_of.h" "estl/h_vector.h" "estl/mutex.h" "estl/intrusive_ptr.h" "estl/trivial_reverse_iterator.h" - "estl/span.h" "estl/chunk.h" "estl/fast_hash_traits.h" "estl/debug_macros.h" "estl/defines.h" + "estl/cow.h" "estl/overloaded.h" "estl/one_of.h" "estl/h_vector.h" "estl/mutex.h" "estl/intrusive_ptr.h" "estl/trivial_reverse_iterator.h" + "estl/span.h" "estl/chunk.h" "estl/fast_hash_traits.h" "estl/debug_macros.h" "estl/defines.h" "client/reindexer.h" "client/item.h" "client/reindexerconfig.h" "client/queryresults.h" "client/resultserializer.h" "client/internalrdxcontext.h" "client/transaction.h" "client/cororeindexer.h" "client/coroqueryresults.h" "client/corotransaction.h" diff --git a/cpp_src/client/cororeindexer.cc b/cpp_src/client/cororeindexer.cc index 4cb747cf5..4794f2ae6 100644 --- a/cpp_src/client/cororeindexer.cc +++ b/cpp_src/client/cororeindexer.cc @@ -33,7 +33,7 @@ CoroReindexer& CoroReindexer::operator=(CoroReindexer&& rdx) noexcept { Error CoroReindexer::Connect(const std::string& dsn, dynamic_loop& loop, const client::ConnectOpts& opts) { return impl_->Connect(dsn, loop, opts); } -Error CoroReindexer::Stop() { return impl_->Stop(); } +void CoroReindexer::Stop() { impl_->Stop(); } Error CoroReindexer::AddNamespace(const NamespaceDef& nsDef) { return impl_->AddNamespace(nsDef, ctx_); } Error CoroReindexer::OpenNamespace(std::string_view nsName, const StorageOpts& storage) { return impl_->OpenNamespace(nsName, ctx_, storage); diff --git a/cpp_src/client/cororeindexer.h b/cpp_src/client/cororeindexer.h index da211a6aa..25ef11f6f 100644 --- a/cpp_src/client/cororeindexer.h +++ b/cpp_src/client/cororeindexer.h @@ -41,12 +41,13 @@ class CoroReindexer { CoroReindexer &operator=(CoroReindexer &&) noexcept; /// Connect - connect to reindexer server - /// @param dsn - uri of server and database, like: `cproto://user@password:127.0.0.1:6534/dbname` + /// @param dsn - uri of server and database, like: `cproto://user@password:127.0.0.1:6534/dbname` or + /// `ucproto://user@password:/tmp/reindexer.sock:/dbname` /// @param loop - event loop for connections and coroutines handling /// @param opts - Connect options. May contaion any of
Error Connect(const std::string &dsn, dynamic_loop &loop, const client::ConnectOpts &opts = client::ConnectOpts()); /// Stop - shutdown connector - Error Stop(); + void Stop(); /// Open or create namespace /// @param nsName - Name of namespace /// @param opts - Storage options. Can be one of
diff --git a/cpp_src/client/cororpcclient.cc b/cpp_src/client/cororpcclient.cc index 91ab0114b..feb8cbb00 100644 --- a/cpp_src/client/cororpcclient.cc +++ b/cpp_src/client/cororpcclient.cc @@ -26,6 +26,7 @@ CoroRPCClient::CoroRPCClient(const ReindexerConfig& config) : config_(config) { CoroRPCClient::~CoroRPCClient() { Stop(); } Error CoroRPCClient::Connect(const std::string& dsn, ev::dynamic_loop& loop, const client::ConnectOpts& opts) { + using namespace std::string_view_literals; if (conn_.IsRunning()) { return Error(errLogic, "Client is already started"); } @@ -34,9 +35,15 @@ Error CoroRPCClient::Connect(const std::string& dsn, ev::dynamic_loop& loop, con if (!connectData.uri.parse(dsn)) { return Error(errParams, "%s is not valid uri", dsn); } - if (connectData.uri.scheme() != "cproto") { +#ifdef _WIN32 + if (connectData.uri.scheme() != "cproto"sv) { return Error(errParams, "Scheme must be cproto"); } +#else + if (connectData.uri.scheme() != "cproto"sv && connectData.uri.scheme() != "ucproto"sv) { + return Error(errParams, "Scheme must be either cproto or ucproto"); + } +#endif connectData.opts = cproto::CoroClientConnection::Options( config_.ConnectTimeout, config_.RequestTimeout, opts.IsCreateDBIfMissing(), opts.HasExpectedClusterID(), opts.ExpectedClusterID(), config_.ReconnectAttempts, config_.EnableCompression, config_.RequestDedicatedThread, config_.AppName); @@ -46,13 +53,12 @@ Error CoroRPCClient::Connect(const std::string& dsn, ev::dynamic_loop& loop, con return errOK; } -Error CoroRPCClient::Stop() { +void CoroRPCClient::Stop() { terminate_ = true; conn_.Stop(); resubWg_.wait(); loop_ = nullptr; terminate_ = false; - return errOK; } Error CoroRPCClient::AddNamespace(const NamespaceDef& nsDef, const InternalRdxContext& ctx) { @@ -236,7 +242,7 @@ Error CoroRPCClient::Delete(const Query& query, CoroQueryResults& result, const query.Serialize(ser); NsArray nsArray; - query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q._namespace)); }); + query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q.NsName())); }); result = CoroQueryResults(&conn_, std::move(nsArray), 0, config_.FetchAmount, config_.RequestTimeout); @@ -257,7 +263,7 @@ Error CoroRPCClient::Update(const Query& query, CoroQueryResults& result, const query.Serialize(ser); NsArray nsArray; - query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q._namespace)); }); + query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q.NsName())); }); result = CoroQueryResults(&conn_, std::move(nsArray), 0, config_.FetchAmount, config_.RequestTimeout); @@ -322,7 +328,7 @@ Error CoroRPCClient::selectImpl(const Query& query, CoroQueryResults& result, se } NsArray nsArray; query.Serialize(qser); - query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q._namespace)); }); + query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q.NsName())); }); h_vector vers; for (auto& ns : nsArray) { vers.push_back(ns->tagsMatcher_.version() ^ ns->tagsMatcher_.stateToken()); diff --git a/cpp_src/client/cororpcclient.h b/cpp_src/client/cororpcclient.h index 8c217aa3d..50278e8fd 100644 --- a/cpp_src/client/cororpcclient.h +++ b/cpp_src/client/cororpcclient.h @@ -36,7 +36,7 @@ class CoroRPCClient { ~CoroRPCClient(); Error Connect(const std::string &dsn, ev::dynamic_loop &loop, const client::ConnectOpts &opts); - Error Stop(); + void Stop(); Error OpenNamespace(std::string_view nsName, const InternalRdxContext &ctx, const StorageOpts &opts = StorageOpts().Enabled().CreateIfMissing()); diff --git a/cpp_src/client/item.h b/cpp_src/client/item.h index 3f1bbd684..618398a5c 100644 --- a/cpp_src/client/item.h +++ b/cpp_src/client/item.h @@ -60,13 +60,13 @@ class Item { /// Get status of item /// @return data slice with JSON. Returned slice is allocated in temporary Item's buffer, and can be invalidated by any next operation /// with Item - Error Status() { return status_; } + Error Status() const noexcept { return status_; } /// Get internal ID of item /// @return ID of item int GetID() const noexcept { return id_; } - /// Get internal version of item - /// @return version of item - int NumFields(); + /// Get count of indexed fields + /// @return count of indexed fields + int NumFields() const noexcept; /// Set additional percepts for modify operation /// @param precepts - strings in format "fieldName=Func()" void SetPrecepts(const std::vector &precepts); diff --git a/cpp_src/client/itemimpl.cc b/cpp_src/client/itemimpl.cc index 3d33bf1db..3365280fd 100644 --- a/cpp_src/client/itemimpl.cc +++ b/cpp_src/client/itemimpl.cc @@ -48,7 +48,7 @@ void ItemImpl::FromCJSON(std::string_view slice) { Payload pl = GetPayload(); CJsonDecoder decoder(tagsMatcher_); ser_.Reset(); - decoder.Decode(pl, rdser, ser_); + decoder.Decode<>(pl, rdser, ser_); if (!rdser.Eof() && rdser.Pos() != tmOffset) { throw Error(errParseJson, "Internal error - left unparsed data %d", rdser.Pos()); diff --git a/cpp_src/client/queryresults.cc b/cpp_src/client/queryresults.cc index 7f0b070b1..83b1755c3 100644 --- a/cpp_src/client/queryresults.cc +++ b/cpp_src/client/queryresults.cc @@ -114,8 +114,6 @@ void QueryResults::fetchNextResults() { rawResult_.assign(rawResult.begin() + ser.Pos(), rawResult.end()); } -QueryResults::~QueryResults() {} - h_vector QueryResults::GetNamespaces() const { h_vector ret; ret.reserve(nsArray_.size()); @@ -310,8 +308,5 @@ QueryResults::Iterator &QueryResults::Iterator::operator++() { return *this; } -bool QueryResults::Iterator::operator!=(const Iterator &other) const { return idx_ != other.idx_; } -bool QueryResults::Iterator::operator==(const Iterator &other) const { return idx_ == other.idx_; } - } // namespace client } // namespace reindexer diff --git a/cpp_src/client/queryresults.h b/cpp_src/client/queryresults.h index 97c2a1835..e8b059568 100644 --- a/cpp_src/client/queryresults.h +++ b/cpp_src/client/queryresults.h @@ -26,7 +26,7 @@ class QueryResults { QueryResults(int fetchFlags = 0); QueryResults(const QueryResults&) = delete; QueryResults(QueryResults&&) noexcept; - ~QueryResults(); + ~QueryResults() = default; QueryResults& operator=(const QueryResults&) = delete; QueryResults& operator=(QueryResults&& obj) noexcept; @@ -40,10 +40,10 @@ class QueryResults { bool IsRaw(); std::string_view GetRaw(); Iterator& operator++(); - Error Status() { return qr_->status_; } - bool operator!=(const Iterator&) const; - bool operator==(const Iterator&) const; - Iterator& operator*() { return *this; } + Error Status() const noexcept { return qr_->status_; } + bool operator==(const Iterator& other) const noexcept { return idx_ == other.idx_; } + bool operator!=(const Iterator& other) const noexcept { return !operator==(other); } + Iterator& operator*() noexcept { return *this; } void readNext(); void getJSONFromCJSON(std::string_view cjson, WrSerializer& wrser, bool withHdrLen = true); @@ -55,13 +55,15 @@ class QueryResults { Iterator begin() const { return Iterator{this, 0, 0, 0, {}}; } Iterator end() const { return Iterator{this, queryParams_.qcount, 0, 0, {}}; } - size_t Count() const { return queryParams_.qcount; } - int TotalCount() const { return queryParams_.totalcount; } - bool HaveRank() const { return queryParams_.flags & kResultsWithRank; } - bool NeedOutputRank() const { return queryParams_.flags & kResultsNeedOutputRank; } - const std::string& GetExplainResults() const { return queryParams_.explainResults; } - const std::vector& GetAggregationResults() const { return queryParams_.aggResults; } - Error Status() { return status_; } + size_t Count() const noexcept { return queryParams_.qcount; } + int TotalCount() const noexcept { return queryParams_.totalcount; } + bool HaveRank() const noexcept { return queryParams_.flags & kResultsWithRank; } + bool NeedOutputRank() const noexcept { return queryParams_.flags & kResultsNeedOutputRank; } + const std::string& GetExplainResults() const& noexcept { return queryParams_.explainResults; } + const std::string& GetExplainResults() const&& = delete; + const std::vector& GetAggregationResults() const& noexcept { return queryParams_.aggResults; } + const std::vector& GetAggregationResults() const&& = delete; + Error Status() const noexcept { return status_; } h_vector GetNamespaces() const; bool IsCacheEnabled() const { return queryParams_.flags & kResultsWithItemID; } diff --git a/cpp_src/client/reindexer.cc b/cpp_src/client/reindexer.cc index e28041713..248b25cb0 100644 --- a/cpp_src/client/reindexer.cc +++ b/cpp_src/client/reindexer.cc @@ -29,7 +29,7 @@ Error Reindexer::Connect(const std::string& dsn, const client::ConnectOpts& opts Error Reindexer::Connect(const std::vector>& connectData) { return impl_->Connect(connectData); } -Error Reindexer::Stop() { return impl_->Stop(); } +void Reindexer::Stop() { impl_->Stop(); } Error Reindexer::AddNamespace(const NamespaceDef& nsDef) { return impl_->AddNamespace(nsDef, ctx_); } Error Reindexer::OpenNamespace(std::string_view nsName, const StorageOpts& storage) { return impl_->OpenNamespace(nsName, ctx_, storage); } Error Reindexer::DropNamespace(std::string_view nsName) { return impl_->DropNamespace(nsName, ctx_); } diff --git a/cpp_src/client/reindexer.h b/cpp_src/client/reindexer.h index 705de78e3..a8a3a8492 100644 --- a/cpp_src/client/reindexer.h +++ b/cpp_src/client/reindexer.h @@ -41,14 +41,15 @@ class Reindexer { Reindexer &operator=(Reindexer &&) noexcept; /// Connect - connect to reindexer server - /// @param dsn - uri of server and database, like: `cproto://user@password:127.0.0.1:6534/dbname` + /// @param dsn - uri of server and database, like: `cproto://user@password:127.0.0.1:6534/dbname` or + /// `ucproto://user@password:/tmp/reindexer.sock:/dbname` /// @param opts - Connect options. May contaion any of
Error Connect(const std::string &dsn, const client::ConnectOpts &opts = client::ConnectOpts()); /// Connect - connect to reindexer server /// @param connectData - list of server dsn + it's ConnectOpts Error Connect(const std::vector> &connectData); /// Stop - shutdown connector - Error Stop(); + void Stop(); /// Open or create namespace /// @param nsName - Name of namespace /// @param opts - Storage options. Can be one of
diff --git a/cpp_src/client/rpcclient.cc b/cpp_src/client/rpcclient.cc index 29b6f5ccc..3c7d7089c 100644 --- a/cpp_src/client/rpcclient.cc +++ b/cpp_src/client/rpcclient.cc @@ -76,8 +76,8 @@ Error RPCClient::Connect(const std::vector vers; for (auto& ns : nsArray) { shared_lock lck(ns->lck_); diff --git a/cpp_src/client/rpcclient.h b/cpp_src/client/rpcclient.h index 238b16a12..eefb2cf03 100644 --- a/cpp_src/client/rpcclient.h +++ b/cpp_src/client/rpcclient.h @@ -41,7 +41,7 @@ class RPCClient { Error Connect(const std::string &dsn, const client::ConnectOpts &opts); Error Connect(const std::vector> &connectData); - Error Stop(); + void Stop(); Error OpenNamespace(std::string_view nsName, const InternalRdxContext &ctx, const StorageOpts &opts = StorageOpts().Enabled().CreateIfMissing()); diff --git a/cpp_src/client/rpcclientmock.cc b/cpp_src/client/rpcclientmock.cc index f2cb5635e..5c0d4b6ef 100644 --- a/cpp_src/client/rpcclientmock.cc +++ b/cpp_src/client/rpcclientmock.cc @@ -37,7 +37,7 @@ Error RPCClientMock::Delete(const Query& query, QueryResults& result, const Inte auto conn = getConn(); NsArray nsArray; - query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q._namespace)); }); + query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q.NsName())); }); result = QueryResults(conn, std::move(nsArray), nullptr, 0, config_.FetchAmount, config_.RequestTimeout); @@ -69,7 +69,7 @@ Error RPCClientMock::Update(const Query& query, QueryResults& result, const Inte auto conn = getConn(); NsArray nsArray; - query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q._namespace)); }); + query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q.NsName())); }); result = QueryResults(conn, std::move(nsArray), nullptr, 0, config_.FetchAmount, config_.RequestTimeout); @@ -308,7 +308,7 @@ Error RPCClientMock::selectImpl(const Query& query, QueryResults& result, cproto NsArray nsArray; query.Serialize(qser); - query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q._namespace)); }); + query.WalkNested(true, true, [this, &nsArray](const Query& q) { nsArray.push_back(getNamespace(q.NsName())); }); h_vector vers; for (auto& ns : nsArray) { shared_lock lck(ns->lck_); diff --git a/cpp_src/client/synccororeindexer.h b/cpp_src/client/synccororeindexer.h index 79257bb86..24e74dc81 100644 --- a/cpp_src/client/synccororeindexer.h +++ b/cpp_src/client/synccororeindexer.h @@ -25,7 +25,8 @@ class SyncCoroReindexer { SyncCoroReindexer &operator=(SyncCoroReindexer &&rdx) noexcept; /// Connect - connect to reindexer server - /// @param dsn - uri of server and database, like: `cproto://user@password:127.0.0.1:6534/dbname` + /// @param dsn - uri of server and database, like: `cproto://user@password:127.0.0.1:6534/dbname` or + /// `ucproto://user@password:/tmp/reindexer.sock:/dbname` /// @param opts - Connect options. May contaion any of
Error Connect(const std::string &dsn, const client::ConnectOpts &opts = client::ConnectOpts()); /// Stop - shutdown connector diff --git a/cpp_src/client/synccororeindexerimpl.cc b/cpp_src/client/synccororeindexerimpl.cc index 1ca2b1d1b..2d005b291 100644 --- a/cpp_src/client/synccororeindexerimpl.cc +++ b/cpp_src/client/synccororeindexerimpl.cc @@ -108,7 +108,7 @@ Error SyncCoroReindexerImpl::GetMeta(std::string_view nsName, const std::string } Error SyncCoroReindexerImpl::PutMeta(std::string_view nsName, const std::string &key, std::string_view data, const InternalRdxContext &ctx) { - return sendCommand(DbCmdPutMeta, std::forward(nsName), key, data, ctx); + return sendCommand(DbCmdPutMeta, std::forward(nsName), key, std::forward(data), ctx); } Error SyncCoroReindexerImpl::EnumMeta(std::string_view nsName, std::vector &keys, const InternalRdxContext &ctx) { return sendCommand(DbCmdEnumMeta, std::forward(nsName), keys, ctx); diff --git a/cpp_src/cmd/reindexer_server/test/get_last_rx_version.py b/cpp_src/cmd/reindexer_server/test/get_last_rx_version.py new file mode 100644 index 000000000..8007b8201 --- /dev/null +++ b/cpp_src/cmd/reindexer_server/test/get_last_rx_version.py @@ -0,0 +1,28 @@ +import argparse +import re + +import requests +from packaging.version import parse + + +URL = "http://repo.restream.ru/itv-api-ng/7/x86_64/" + +parser = argparse.ArgumentParser(description='Version') +parser.add_argument('-v', '--version', default="3") +args = parser.parse_args() + +version = args.version +if version == "3": + name = ">reindexer-server-" +elif version == "4": + name = ">reindexer-4-server-" +else: + raise ValueError(f"Version {version} is invalid") + +r = requests.get(URL) +res = r.text +res_list = re.findall(f'{name}.*.rpm', res) +versions_list = [(i[1:], parse(i[len(name):-11])) for i in res_list] +versions_list.sort(key=lambda x: x[1]) + +print(versions_list[-1][0]) diff --git a/cpp_src/cmd/reindexer_server/test/test_storage_compatibility.sh b/cpp_src/cmd/reindexer_server/test/test_storage_compatibility.sh new file mode 100755 index 000000000..d189d3841 --- /dev/null +++ b/cpp_src/cmd/reindexer_server/test/test_storage_compatibility.sh @@ -0,0 +1,195 @@ +#!/bin/bash +# Task: https://github.com/restream/reindexer/-/issues/1188 +set -e + +function KillAndRemoveServer { + local pid=$1 + kill $pid + wait $pid + yum remove -y 'reindexer*' > /dev/null +} + +function WaitForDB { + # wait until DB is loaded + set +e # disable "exit on error" so the script won't stop when DB's not loaded yet + is_connected=$(reindexer_tool --dsn $ADDRESS --command '\databases list'); + while [[ $is_connected != "test" ]] + do + sleep 2 + is_connected=$(reindexer_tool --dsn $ADDRESS --command '\databases list'); + done + set -e +} + +function CompareNamespacesLists { + local ns_list_actual=$1 + local ns_list_expected=$2 + local pid=$3 + + diff=$(echo ${ns_list_actual[@]} ${ns_list_expected[@]} | tr ' ' '\n' | sort | uniq -u) # compare in any order + if [ "$diff" == "" ]; then + echo "## PASS: namespaces list not changed" + else + echo "##### FAIL: namespaces list was changed" + echo "expected: $ns_list_expected" + echo "actual: $ns_list_actual" + KillAndRemoveServer $pid; + exit 1 + fi +} + +function CompareMemstats { + local actual=$1 + local expected=$2 + local pid=$3 + diff=$(echo ${actual[@]} ${expected[@]} | tr ' ' '\n' | sed 's/\(.*\),$/\1/' | sort | uniq -u) # compare in any order + if [ "$diff" == "" ]; then + echo "## PASS: memstats not changed" + else + echo "##### FAIL: memstats was changed" + echo "expected: $expected" + echo "actual: $actual" + KillAndRemoveServer $pid; + exit 1 + fi +} + + +RX_SERVER_CURRENT_VERSION_RPM="$(basename build/reindexer-*server*.rpm)" +VERSION_FROM_RPM=$(echo "$RX_SERVER_CURRENT_VERSION_RPM" | grep -o '.*server-..') +VERSION=$(echo ${VERSION_FROM_RPM: -2:1}) # one-digit version + +echo "## choose latest release rpm file" +if [ $VERSION == 3 ]; then + LATEST_RELEASE=$(python3 cpp_src/cmd/reindexer_server/test/get_last_rx_version.py -v 3) + namespaces_list_expected=$'purchase_options_ext_dict\nchild_account_recommendations\n#config\n#activitystats\nradio_channels\ncollections\n#namespaces\nwp_imports_tasks\nepg_genres\nrecom_media_items_personal\nrecom_epg_archive_default\n#perfstats\nrecom_epg_live_default\nmedia_view_templates\nasset_video_servers\nwp_tasks_schedule\nadmin_roles\n#clientsstats\nrecom_epg_archive_personal\nrecom_media_items_similars\nmenu_items\naccount_recommendations\nkaraoke_items\nmedia_items\nbanners\n#queriesperfstats\nrecom_media_items_default\nrecom_epg_live_personal\nservices\n#memstats\nchannels\nmedia_item_recommendations\nwp_tasks_tasks\nepg' +elif [ $VERSION == 4 ]; then + LATEST_RELEASE=$(python3 cpp_src/cmd/reindexer_server/test/get_last_rx_version.py -v 4) + # replicationstats ns added for v4 + namespaces_list_expected=$'purchase_options_ext_dict\nchild_account_recommendations\n#config\n#activitystats\n#replicationstats\nradio_channels\ncollections\n#namespaces\nwp_imports_tasks\nepg_genres\nrecom_media_items_personal\nrecom_epg_archive_default\n#perfstats\nrecom_epg_live_default\nmedia_view_templates\nasset_video_servers\nwp_tasks_schedule\nadmin_roles\n#clientsstats\nrecom_epg_archive_personal\nrecom_media_items_similars\nmenu_items\naccount_recommendations\nkaraoke_items\nmedia_items\nbanners\n#queriesperfstats\nrecom_media_items_default\nrecom_epg_live_personal\nservices\n#memstats\nchannels\nmedia_item_recommendations\nwp_tasks_tasks\nepg' +else + echo "Unknown version" + exit 1 +fi + +echo "## downloading latest release rpm file: $LATEST_RELEASE" +curl "http://repo.itv.restr.im/itv-api-ng/7/x86_64/$LATEST_RELEASE" --output $LATEST_RELEASE; +echo "## downloading example DB" +curl "https://git.restream.ru/MaksimKravchuk/reindexer_testdata/-/raw/master/big.zip" --output big.zip; +unzip -o big.zip # unzips into mydb_big.rxdump; + +ADDRESS="cproto://127.0.0.1:6534/" +DB_NAME="test" + +memstats_expected=$'[ +{"replication":{"data_hash":24651210926,"data_count":3}}, +{"replication":{"data_hash":6252344969,"data_count":1}}, +{"replication":{"data_hash":37734732881,"data_count":28}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":1024095024522,"data_count":1145}}, +{"replication":{"data_hash":8373644068,"data_count":1315}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":7404222244,"data_count":97}}, +{"replication":{"data_hash":94132837196,"data_count":4}}, +{"replication":{"data_hash":1896088071,"data_count":2}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":-672103903,"data_count":33538}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":6833710705,"data_count":1}}, +{"replication":{"data_hash":5858155773472,"data_count":4500}}, +{"replication":{"data_hash":-473221280268823592,"data_count":65448}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":8288213744,"data_count":3}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":0,"data_count":0}}, +{"replication":{"data_hash":354171024786967,"data_count":3941}}, +{"replication":{"data_hash":-6520334670,"data_count":35886}}, +{"replication":{"data_hash":112772074632,"data_count":281}}, +{"replication":{"data_hash":-12679568198538,"data_count":1623116}} +] +Returned 27 rows' + +echo "##### Forward compatibility test #####" + +DB_PATH=$(pwd)"/rx_db" + +echo "Database: "$DB_PATH + +echo "## installing latest release: $LATEST_RELEASE" +yum install -y $LATEST_RELEASE > /dev/null; +# run RX server with disabled logging +reindexer_server -l warning --httplog=none --rpclog=none --db $DB_PATH & +server_pid=$! +sleep 2; + +reindexer_tool --dsn $ADDRESS$DB_NAME -f mydb_big.rxdump --createdb; +sleep 1; + +namespaces_1=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command '\namespaces list'); +echo $namespaces_1; +CompareNamespacesLists "${namespaces_1[@]}" "${namespaces_list_expected[@]}" $server_pid; + +memstats_1=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command 'select replication.data_hash, replication.data_count from #memstats'); +CompareMemstats "${memstats_1[@]}" "${memstats_expected[@]}" $server_pid; + +KillAndRemoveServer $server_pid; + +echo "## installing current version: $RX_SERVER_CURRENT_VERSION_RPM" +yum install -y build/*.rpm > /dev/null; +reindexer_server -l0 --corelog=none --httplog=none --rpclog=none --db $DB_PATH & +server_pid=$! +sleep 2; + +WaitForDB + +namespaces_2=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command '\namespaces list'); +echo $namespaces_2; +CompareNamespacesLists "${namespaces_2[@]}" "${namespaces_1[@]}" $server_pid; + +memstats_2=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command 'select replication.data_hash, replication.data_count from #memstats'); +CompareMemstats "${memstats_2[@]}" "${memstats_1[@]}" $server_pid; + +KillAndRemoveServer $server_pid; +rm -rf $DB_PATH; +sleep 1; + +echo "##### Backward compatibility test #####" + +echo "## installing current version: $RX_SERVER_CURRENT_VERSION_RPM" +yum install -y build/*.rpm > /dev/null; +reindexer_server -l warning --httplog=none --rpclog=none --db $DB_PATH & +server_pid=$! +sleep 2; + +reindexer_tool --dsn $ADDRESS$DB_NAME -f mydb_big.rxdump --createdb; +sleep 1; + +namespaces_3=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command '\namespaces list'); +echo $namespaces_3; +CompareNamespacesLists "${namespaces_3[@]}" "${namespaces_list_expected[@]}" $server_pid; + +memstats_3=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command 'select replication.data_hash, replication.data_count from #memstats'); +CompareMemstats "${memstats_3[@]}" "${memstats_expected[@]}" $server_pid; + +KillAndRemoveServer $server_pid; + +echo "## installing latest release: $LATEST_RELEASE" +yum install -y $LATEST_RELEASE > /dev/null; +reindexer_server -l warning --httplog=none --rpclog=none --db $DB_PATH & +server_pid=$! +sleep 2; + +WaitForDB + +namespaces_4=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command '\namespaces list'); +echo $namespaces_4; +CompareNamespacesLists "${namespaces_4[@]}" "${namespaces_3[@]}" $server_pid; + +memstats_4=$(reindexer_tool --dsn $ADDRESS$DB_NAME --command 'select replication.data_hash, replication.data_count from #memstats'); +CompareMemstats "${memstats_4[@]}" "${memstats_3[@]}" $server_pid; + +KillAndRemoveServer $server_pid; +rm -rf $DB_PATH; diff --git a/cpp_src/cmd/reindexer_tool/commandsexecutor.cc b/cpp_src/cmd/reindexer_tool/commandsexecutor.cc index 8509155f5..77c3abb70 100644 --- a/cpp_src/cmd/reindexer_tool/commandsexecutor.cc +++ b/cpp_src/cmd/reindexer_tool/commandsexecutor.cc @@ -47,19 +47,18 @@ Error CommandsExecutor::Run(const std::string& } template -void CommandsExecutor::GetSuggestions(const std::string& input, std::vector& suggestions) { +Error CommandsExecutor::GetSuggestions(const std::string& input, std::vector& suggestions) { OutParamCommand> cmd( - [this, &input](std::vector& suggestions) { - getSuggestions(input, suggestions); - return errOK; - }, - suggestions); - execCommand(cmd); + [this, &input](std::vector& suggestions) { return getSuggestions(input, suggestions); }, suggestions); + return execCommand(cmd); } template Error CommandsExecutor::Stop() { - GenericCommand cmd([this] { return stop(true); }); + GenericCommand cmd([this] { + stop(true); + return Error{}; + }); auto err = execCommand(cmd); if (err.ok() && executorThr_.joinable()) { executorThr_.join(); @@ -277,11 +276,26 @@ Error CommandsExecutor::runImpl(const std::string& dsn, Args&&... a template std::string CommandsExecutor::getCurrentDsn(bool withPath) const { + using namespace std::string_view_literals; std::string dsn(uri_.scheme() + "://"); if (!uri_.password().empty() && !uri_.username().empty()) { - dsn += uri_.username() + ":" + uri_.password() + "@"; + dsn += uri_.username() + ':' + uri_.password() + '@'; + } + if (uri_.scheme() == "ucproto"sv) { + std::vector pathParts; + reindexer::split(std::string_view(uri_.path()), ":", true, pathParts); + std::string_view dbName; + if (pathParts.size() >= 2) { + dbName = pathParts.back(); + } + if (dbName.size()) { + dsn += uri_.path().substr(0, uri_.path().size() - dbName.size() - 1) + ':' + (withPath ? uri_.path() : "/"); + } else { + dsn += uri_.path() + ':' + (withPath ? uri_.path() : "/"); + } + } else { + dsn += uri_.hostname() + ':' + uri_.port() + (withPath ? uri_.path() : "/"); } - dsn += uri_.hostname() + ":" + uri_.port() + (withPath ? uri_.path() : "/"); return dsn; } @@ -475,15 +489,14 @@ Error CommandsExecutor::processImpl(const std::string& command) noe } template <> -Error CommandsExecutor::stop(bool terminate) { +void CommandsExecutor::stop(bool terminate) { if (terminate) { stopCh_.close(); } - return Error(); } template <> -Error CommandsExecutor::stop(bool terminate) { +void CommandsExecutor::stop(bool terminate) { if (terminate) { stopCh_.close(); } @@ -491,11 +504,17 @@ Error CommandsExecutor::stop(bool terminate) { } template -void CommandsExecutor::getSuggestions(const std::string& input, std::vector& suggestions) { - if (!input.empty() && input[0] != '\\') db().GetSqlSuggestions(input, input.length() - 1, suggestions); +Error CommandsExecutor::getSuggestions(const std::string& input, std::vector& suggestions) { + if (!input.empty() && input[0] != '\\') { + auto err = db().GetSqlSuggestions(input, input.length() - 1, suggestions); + if (!err.ok()) { + return err; + } + } if (suggestions.empty()) { addCommandsSuggestions(input, suggestions); } + return {}; } template @@ -761,7 +780,8 @@ Error CommandsExecutor::commandDump(const std::string& command) { return Error(errCanceled, "Canceled"); } wrser << "\\UPSERT " << reindexer::escapeString(nsDef.name) << ' '; - it.GetJSON(wrser, false); + err = it.GetJSON(wrser, false); + if (!err.ok()) return err; wrser << '\n'; if (wrser.Len() > 0x100000) { output_() << wrser.Slice(); @@ -847,9 +867,11 @@ Error CommandsExecutor::commandMeta(const std::string& command) { auto nsName = reindexer::unescapeString(parser.NextToken()); std::vector allMeta; auto err = db().EnumMeta(nsName, allMeta); + if (!err.ok()) return err; for (auto& metaKey : allMeta) { std::string metaData; - db().GetMeta(nsName, metaKey, metaData); + err = db().GetMeta(nsName, metaKey, metaData); + if (!err.ok()) return err; output_() << metaKey << " = " << metaData << std::endl; } return err; @@ -917,15 +939,18 @@ Error CommandsExecutor::commandBench(const std::string& command) { LineParser parser(command); parser.NextToken(); - int benchTime = reindexer::stoi(parser.NextToken()); - if (benchTime == 0) benchTime = kBenchDefaultTime; + const std::string_view benchTimeToken = parser.NextToken(); + const int benchTime = benchTimeToken.empty() ? kBenchDefaultTime : reindexer::stoi(benchTimeToken); - db().DropNamespace(kBenchNamespace); + auto err = db().DropNamespace(kBenchNamespace); + if (!err.ok() && err.code() != errNotFound) { + return err; + } NamespaceDef nsDef(kBenchNamespace); nsDef.AddIndex("id", "hash", "int", IndexOpts().PK()); - auto err = db().AddNamespace(nsDef); + err = db().AddNamespace(nsDef); if (!err.ok()) return err; output_() << "Seeding " << kBenchItemsCount << " documents to bench namespace..." << std::endl; @@ -1002,31 +1027,30 @@ Error CommandsExecutor::commandSubscribe(const std::string& command template <> Error CommandsExecutor::commandProcessDatabases(const std::string& command) { + using namespace std::string_view_literals; LineParser parser(command); parser.NextToken(); std::string_view subCommand = parser.NextToken(); - assertrx(uri_.scheme() == "cproto"); - if (subCommand == "list") { + assertrx(uri_.scheme() == "cproto"sv || uri_.scheme() == "ucproto"sv); + if (subCommand == "list"sv) { std::vector dbList; Error err = getAvailableDatabases(dbList); if (!err.ok()) return err; for (const std::string& dbName : dbList) output_() << dbName << std::endl; return Error(); - } else if (subCommand == "use") { + } else if (subCommand == "use"sv) { std::string currentDsn = getCurrentDsn() + std::string(parser.NextToken()); - Error err = stop(false); - if (!err.ok()) return err; - err = db().Connect(currentDsn, loop_); + stop(false); + auto err = db().Connect(currentDsn, loop_); if (err.ok()) err = db().Status(); if (err.ok()) output_() << "Succesfully connected to " << currentDsn << std::endl; return err; - } else if (subCommand == "create") { + } else if (subCommand == "create"sv) { auto dbName = parser.NextToken(); std::string currentDsn = getCurrentDsn() + std::string(dbName); - Error err = stop(false); - if (!err.ok()) return err; + stop(false); output_() << "Creating database '" << dbName << "'" << std::endl; - err = db().Connect(currentDsn, loop_, reindexer::client::ConnectOpts().CreateDBIfMissing()); + auto err = db().Connect(currentDsn, loop_, reindexer::client::ConnectOpts().CreateDBIfMissing()); if (!err.ok()) { std::cerr << "Error on database '" << dbName << "' creation" << std::endl; return err; @@ -1145,11 +1169,14 @@ std::function CommandsExecutor Error Run(const std::string& dsn, const Args&... args); - void GetSuggestions(const std::string& input, std::vector& suggestions); + Error GetSuggestions(const std::string& input, std::vector& suggestions); Error Stop(); Error Process(const std::string& command); Error FromFile(std::istream& in); @@ -78,8 +78,8 @@ class CommandsExecutor : public reindexer::IUpdatesObserver { std::vector& suggestions); Error processImpl(const std::string& command) noexcept; - Error stop(bool terminate); - void getSuggestions(const std::string& input, std::vector& suggestions); + void stop(bool terminate); + Error getSuggestions(const std::string& input, std::vector& suggestions); Error commandSelect(const std::string& command); Error commandUpsert(const std::string& command); Error commandUpdateSQL(const std::string& command); diff --git a/cpp_src/cmd/reindexer_tool/commandsprocessor.cc b/cpp_src/cmd/reindexer_tool/commandsprocessor.cc index 17ce39931..f084ea387 100644 --- a/cpp_src/cmd/reindexer_tool/commandsprocessor.cc +++ b/cpp_src/cmd/reindexer_tool/commandsprocessor.cc @@ -3,8 +3,8 @@ #include #include "client/cororeindexer.h" #include "core/reindexer.h" -#include "tableviewscroller.h" #include "tools/fsops.h" +#include "tools/terminalutils.h" namespace reindexer_tool { @@ -30,9 +30,13 @@ template void CommandsProcessor::setCompletionCallback(T& rx, void (T::*set_completion_callback)(new_v_callback_t const&)) { (rx.*set_completion_callback)([this](std::string const& input, int) -> replxx::Replxx::completions_t { std::vector completions; - executor_.GetSuggestions(input, completions); + const auto err = executor_.GetSuggestions(input, completions); replxx::Replxx::completions_t result; - for (const std::string& suggestion : completions) result.emplace_back(suggestion); + if (err.ok()) { + for (const std::string& suggestion : completions) { + result.emplace_back(suggestion); + } + } return result; }); } @@ -43,7 +47,8 @@ void CommandsProcessor::setCompletionCallback(T& rx, void (T::*set_ (rx.*set_completion_callback)( [this](std::string const& input, int, void*) -> replxx::Replxx::completions_t { std::vector completions; - executor_.GetSuggestions(input, completions); + const auto err = executor_.GetSuggestions(input, completions); + if (!err.ok()) return {}; return completions; }, nullptr); diff --git a/cpp_src/cmd/reindexer_tool/readme.md b/cpp_src/cmd/reindexer_tool/readme.md index e7a71e6c2..19be92a3a 100644 --- a/cpp_src/cmd/reindexer_tool/readme.md +++ b/cpp_src/cmd/reindexer_tool/readme.md @@ -21,7 +21,7 @@ Reindexer command line tool is an client utility to work with database. reindexer_tool {OPTIONS} Options - -d[DSN], --dsn=[DSN] DSN to 'reindexer', like 'cproto://127.0.0.1:6534/dbname' or 'builtin:///var/lib/reindexer/dbname' + -d[DSN], --dsn=[DSN] DSN to 'reindexer', like 'cproto://127.0.0.1:6534/dbname', 'builtin:///var/lib/reindexer/dbname' or `ucproto://user@password:/tmp/reindexer.sock:/dbname` -f[FILENAME], --filename=[FILENAME] execute commands from file, then exit -c[COMMAND], --command=[COMMAND] run only single command (SQL or internal) and exit -o[FILENAME], --output=[FILENAME] send query results to file diff --git a/cpp_src/cmd/reindexer_tool/reindexer_tool.cc b/cpp_src/cmd/reindexer_tool/reindexer_tool.cc index cb5be7eb0..fdf8ff498 100644 --- a/cpp_src/cmd/reindexer_tool/reindexer_tool.cc +++ b/cpp_src/cmd/reindexer_tool/reindexer_tool.cc @@ -56,9 +56,16 @@ int main(int argc, char* argv[]) { args::HelpFlag help(parser, "help", "show this message", {'h', "help"}); args::Group progOptions("options"); +#ifdef _WIN32 args::ValueFlag dbDsn(progOptions, "DSN", "DSN to 'reindexer'. Can be 'cproto://:/' or 'builtin://'", {'d', "dsn"}, "", Options::Single | Options::Global); +#else // _WIN32 + args::ValueFlag dbDsn( + progOptions, "DSN", + "DSN to 'reindexer'. Can be 'cproto://:/', 'builtin://' or 'ucproto://:/'", + {'d', "dsn"}, "", Options::Single | Options::Global); +#endif // _WIN32 args::ValueFlag fileName(progOptions, "FILENAME", "execute commands from file, then exit", {'f', "filename"}, "", Options::Single | Options::Global); args::ValueFlag command(progOptions, "COMMAND", "run only single command (SQL or internal) and exit'", {'c', "command"}, @@ -104,6 +111,8 @@ int main(int argc, char* argv[]) { signal(SIGPIPE, SIG_IGN); #endif + using namespace std::string_view_literals; + using reindexer::checkIfStartsWithCS; std::string db; if (dsn.empty()) { db = args::get(dbName); @@ -111,7 +120,15 @@ int main(int argc, char* argv[]) { std::cerr << "Error: --dsn either database name should be set as a first argument" << std::endl; return 2; } - if (db.substr(0, 9) == "cproto://" || db.substr(0, 10) == "builtin://") { + + if (checkIfStartsWithCS("cproto://"sv, db) || checkIfStartsWithCS("ucproto://"sv, db) || checkIfStartsWithCS("builtin://"sv, db)) { +#ifdef _WIN32 + if (checkIfStartsWithCS("ucproto://"sv, db) == 0) { + std::cerr << "Invalid DSN: ucproto:// is not supported on the Windows platform. Use cproto:// or builtin:// instead" + << std::endl; + return 2; + } +#endif // _WIN32 dsn = db; } else { dsn = "cproto://reindexer:reindexer@127.0.0.1:6534/" + db; @@ -131,7 +148,14 @@ int main(int argc, char* argv[]) { std::cout << "Reindexer command line tool version " << REINDEX_VERSION << std::endl; } - if (dsn.compare(0, 9, "cproto://") == 0) { + if (checkIfStartsWithCS("cproto://"sv, dsn) || checkIfStartsWithCS("ucproto://"sv, dsn)) { +#ifdef _WIN32 + if (checkIfStartsWithCS("ucproto://"sv, dsn)) { + std::cerr << "Invalid DSN: ucproto:// is not supported on the Windows platform. Use cproto:// or builtin:// instead" + << std::endl; + return 2; + } +#endif // _WIN32 reindexer::client::ReindexerConfig config; config.EnableCompression = true; config.AppName = args::get(appName); @@ -139,13 +163,17 @@ int main(int argc, char* argv[]) { args::get(connThreads), config); err = commandsProcessor.Connect(dsn, reindexer::client::ConnectOpts().CreateDBIfMissing(createDBF && args::get(createDBF))); if (err.ok()) ok = commandsProcessor.Run(args::get(command)); - } else if (dsn.compare(0, 10, "builtin://") == 0) { + } else if (checkIfStartsWithCS("builtin://"sv, dsn)) { reindexer::Reindexer db; CommandsProcessor commandsProcessor(args::get(outFileName), args::get(fileName), args::get(connThreads)); err = commandsProcessor.Connect(dsn, ConnectOpts().DisableReplication()); if (err.ok()) ok = commandsProcessor.Run(args::get(command)); } else { +#ifdef _WIN32 std::cerr << "Invalid DSN format: " << dsn << " Must begin from cproto:// or builtin://" << std::endl; +#else // _WIN32 + std::cerr << "Invalid DSN format: " << dsn << " Must begin from cproto://, ucproto:// or builtin://" << std::endl; +#endif // _WIN32 } if (!err.ok()) { std::cerr << "ERROR: " << err.what() << std::endl; diff --git a/cpp_src/core/cjson/cjsondecoder.cc b/cpp_src/core/cjson/cjsondecoder.cc index b74b2bcba..9b6b84930 100644 --- a/cpp_src/core/cjson/cjsondecoder.cc +++ b/cpp_src/core/cjson/cjsondecoder.cc @@ -7,71 +7,57 @@ namespace reindexer { -bool CJsonDecoder::decodeCJson(Payload &pl, Serializer &rdser, WrSerializer &wrser, bool match) { +template +bool CJsonDecoder::decodeCJson(Payload &pl, Serializer &rdser, WrSerializer &wrser, FilterT filter, RecoderT recoder, TagOptT) { const ctag tag = rdser.GetCTag(); TagType tagType = tag.Type(); if (tagType == TAG_END) { wrser.PutCTag(kCTagEnd); return false; } - const int tagName = tag.Name(); - if (tagName) { - // Check + int tagName = 0; + if constexpr (std::is_same_v) { + tagName = tag.Name(); + assertrx_dbg(tagName); + // Check if tag exists (void)tagsMatcher_.tag2name(tagName); tagsPath_.emplace_back(tagName); } + if rx_unlikely (tag.Field() >= 0) { - throw Error(errLogic, "Reference tag was found in transport CJSON for field %d[%s] in ns [%s]", tag.Field(), - tagsMatcher_.tag2name(tagName), pl.Type().Name()); + throwTagReferenceError(tag, pl); } const int field = tagsMatcher_.tags2field(tagsPath_.data(), tagsPath_.size()); - - if (filter_) { - if (field >= 0) { - match = filter_->contains(field); - } else { - match = match && filter_->match(tagsPath_); - } - } - Recoder *recoder{nullptr}; - if (recoder_) { - if (field >= 0) { - if (recoder_->Match(field)) { - recoder = recoder_; - } - } else { - if (recoder_->Match(tagsPath_)) { - recoder = recoder_; - } - } - } - if (recoder) { - tagType = recoder->Type(tagType); - } if (field >= 0) { + const bool match = filter.contains(field); if (match) { + tagType = recoder.RegisterTagType(tagType, field); if (tagType == TAG_NULL) { - objectScalarIndexes_.set(field); wrser.PutCTag(ctag{TAG_NULL, tagName}); - } else if (recoder) { - recoder->Recode(rdser, pl, tagName, wrser); + } else if (recoder.Recode(rdser, pl, tagName, wrser)) { + // No more actions needed after recoding } else { const auto &fieldRef{pl.Type().Field(field)}; const KeyValueType fieldType{fieldRef.Type()}; if (tagType == TAG_ARRAY) { if rx_unlikely (!fieldRef.IsArray()) { - throw Error(errLogic, "Error parsing cjson field '%s' - got array, expected scalar %s", fieldRef.Name(), - fieldType.Name()); + throwUnexpectedArrayError(fieldRef); } const carraytag atag = rdser.GetCArrayTag(); const auto count = atag.Count(); const int ofs = pl.ResizeArray(field, count, true); const TagType atagType = atag.Type(); - for (size_t i = 0; i < count; ++i) { - const TagType type = atagType != TAG_OBJECT ? atagType : rdser.GetCTag().Type(); - pl.Set(field, ofs + i, cjsonValueToVariant(type, rdser, fieldType)); + if (atagType != TAG_OBJECT) { + for (size_t i = 0; i < count; ++i) { + pl.Set(field, ofs + i, cjsonValueToVariant(atagType, rdser, fieldType)); + } + } else { + for (size_t i = 0; i < count; ++i) { + pl.Set(field, ofs + i, cjsonValueToVariant(rdser.GetCTag().Type(), rdser, fieldType)); + } } + wrser.PutCTag(ctag{TAG_ARRAY, tagName, field}); wrser.PutVarUint(count); } else { @@ -93,44 +79,67 @@ bool CJsonDecoder::decodeCJson(Payload &pl, Serializer &rdser, WrSerializer &wrs skipCjsonTag(tag, rdser); } } else { - wrser.PutCTag(ctag{tagType, tagName, field}); - if (tagType == TAG_OBJECT) { - while (decodeCJson(pl, rdser, wrser, match)) - ; - } else if (!match) { - skipCjsonTag(tag, rdser); - } else if (recoder) { - recoder->Recode(rdser, wrser); - } else if (tagType == TAG_ARRAY) { - const carraytag atag = rdser.GetCArrayTag(); - wrser.PutCArrayTag(atag); - const auto count = atag.Count(); - const TagType atagType = atag.Type(); - CounterGuardIR32 g(arrayLevel_); - for (size_t i = 0; i < count; ++i) { - switch (atagType) { - case TAG_OBJECT: - decodeCJson(pl, rdser, wrser, match); - break; - case TAG_VARINT: - case TAG_NULL: - case TAG_BOOL: - case TAG_STRING: - case TAG_END: - case TAG_DOUBLE: - case TAG_ARRAY: - case TAG_UUID: + const bool match = filter.match(tagsPath_); + if (match) { + tagType = recoder.RegisterTagType(tagType, tagsPath_); + wrser.PutCTag(ctag{tagType, tagName, field}); + if (tagType == TAG_OBJECT) { + while (decodeCJson(pl, rdser, wrser, filter.MakeCleanCopy(), recoder.MakeCleanCopy(), NamedTagOpt{})) + ; + } else if (recoder.Recode(rdser, wrser)) { + // No more actions needed after recoding + } else if (tagType == TAG_ARRAY) { + const carraytag atag = rdser.GetCArrayTag(); + wrser.PutCArrayTag(atag); + const auto count = atag.Count(); + const TagType atagType = atag.Type(); + CounterGuardIR32 g(arrayLevel_); + if (atagType == TAG_OBJECT) { + for (size_t i = 0; i < count; ++i) { + decodeCJson(pl, rdser, wrser, filter.MakeCleanCopy(), recoder.MakeCleanCopy(), NamelessTagOpt{}); + } + } else { + for (size_t i = 0; i < count; ++i) { copyCJsonValue(atagType, rdser, wrser); - break; + } } + } else { + copyCJsonValue(tagType, rdser, wrser); } + } else if (tagType != TAG_OBJECT) { + // !match + skipCjsonTag(tag, rdser); } else { - copyCJsonValue(tagType, rdser, wrser); + // !match + wrser.PutCTag(ctag{tagType, tagName, field}); + while (decodeCJson(pl, rdser, wrser, filter.MakeSkipFilter(), recoder.MakeCleanCopy(), NamedTagOpt{})) + ; } } - if (tagName) tagsPath_.pop_back(); + if constexpr (std::is_same_v) { + tagsPath_.pop_back(); + } + return true; } +RX_NO_INLINE void CJsonDecoder::throwTagReferenceError(ctag tag, const Payload &pl) { + throw Error(errLogic, "Reference tag was found in transport CJSON for field %d[%s] in ns [%s]", tag.Field(), + tagsMatcher_.tag2name(tag.Name()), pl.Type().Name()); +} + +RX_NO_INLINE void CJsonDecoder::throwUnexpectedArrayError(const PayloadFieldType &fieldRef) { + throw Error(errLogic, "Error parsing cjson field '%s' - got array, expected scalar %s", fieldRef.Name(), fieldRef.Type().Name()); +} + +template bool CJsonDecoder::decodeCJson( + Payload &, Serializer &, WrSerializer &, CJsonDecoder::DummyFilter, CJsonDecoder::DummyRecoder, CJsonDecoder::NamelessTagOpt); +template bool CJsonDecoder::decodeCJson( + Payload &, Serializer &, WrSerializer &, CJsonDecoder::DummyFilter, CJsonDecoder::DefaultRecoder, CJsonDecoder::NamelessTagOpt); +template bool CJsonDecoder::decodeCJson( + Payload &, Serializer &, WrSerializer &, CJsonDecoder::RestrictingFilter, CJsonDecoder::DummyRecoder, CJsonDecoder::NamelessTagOpt); +template bool CJsonDecoder::decodeCJson( + Payload &, Serializer &, WrSerializer &, CJsonDecoder::RestrictingFilter, CJsonDecoder::DefaultRecoder, CJsonDecoder::NamelessTagOpt); + } // namespace reindexer diff --git a/cpp_src/core/cjson/cjsondecoder.h b/cpp_src/core/cjson/cjsondecoder.h index a128fe487..abd4facbf 100644 --- a/cpp_src/core/cjson/cjsondecoder.h +++ b/cpp_src/core/cjson/cjsondecoder.h @@ -20,25 +20,153 @@ class Recoder { class CJsonDecoder { public: - CJsonDecoder(TagsMatcher &tagsMatcher) noexcept : tagsMatcher_(tagsMatcher), filter_(nullptr) {} - CJsonDecoder(TagsMatcher &tagsMatcher, const FieldsSet *filter, Recoder *recoder) noexcept - : tagsMatcher_(tagsMatcher), filter_(filter), recoder_(recoder) {} + explicit CJsonDecoder(TagsMatcher &tagsMatcher) noexcept : tagsMatcher_(tagsMatcher) {} - void Decode(Payload &pl, Serializer &rdSer, WrSerializer &wrSer) { + class SkipFilter { + public: + SkipFilter MakeCleanCopy() const noexcept { return SkipFilter(); } + SkipFilter MakeSkipFilter() const noexcept { return SkipFilter(); } + + RX_ALWAYS_INLINE bool contains([[maybe_unused]] int field) const noexcept { return false; } + RX_ALWAYS_INLINE bool match(const TagsPath &) const noexcept { return false; } + }; + + class DummyFilter { + public: + DummyFilter MakeCleanCopy() const noexcept { return DummyFilter(); } + SkipFilter MakeSkipFilter() const noexcept { return SkipFilter(); } + RX_ALWAYS_INLINE bool HasArraysFields(const PayloadTypeImpl &) const noexcept { return false; } + + RX_ALWAYS_INLINE bool contains([[maybe_unused]] int field) const noexcept { return true; } + RX_ALWAYS_INLINE bool match(const TagsPath &) const noexcept { return true; } + }; + + class IndexedSkipFilter { + public: + IndexedSkipFilter(const FieldsSet &f) noexcept : f_(&f) {} + IndexedSkipFilter MakeCleanCopy() const noexcept { return IndexedSkipFilter(*f_); } + IndexedSkipFilter MakeSkipFilter() const noexcept { return IndexedSkipFilter(*f_); } + + RX_ALWAYS_INLINE bool contains(int field) const noexcept { return f_->contains(field); } + RX_ALWAYS_INLINE bool match(const TagsPath &) const noexcept { return false; } + + private: + const FieldsSet *f_; + }; + + class RestrictingFilter { + public: + RestrictingFilter(const FieldsSet &f) noexcept : f_(&f), match_(true) {} + + RestrictingFilter MakeCleanCopy() const noexcept { return RestrictingFilter(*f_); } + IndexedSkipFilter MakeSkipFilter() const noexcept { return IndexedSkipFilter(*f_); } + RX_ALWAYS_INLINE bool HasArraysFields(const PayloadTypeImpl &pt) const noexcept { + for (auto f : *f_) { + if (f >= 0 && pt.Field(f).IsArray()) { + return true; + } + } + return false; + } + + RX_ALWAYS_INLINE bool contains(int field) noexcept { + match_ = f_->contains(field); + return match_; + } + RX_ALWAYS_INLINE bool match(const TagsPath &tagsPath) noexcept { + match_ = match_ && f_->getTagsPathsLength() && f_->match(tagsPath); + return match_; + } + + private: + const FieldsSet *f_; + bool match_; + }; + + class DummyRecoder { + public: + RX_ALWAYS_INLINE DummyRecoder MakeCleanCopy() const noexcept { return DummyRecoder(); } + RX_ALWAYS_INLINE bool Recode(Serializer &, WrSerializer &) const noexcept { return false; } + RX_ALWAYS_INLINE bool Recode(Serializer &, Payload &, [[maybe_unused]] int tagName, WrSerializer &) const noexcept { return false; } + RX_ALWAYS_INLINE TagType RegisterTagType(TagType oldTagType, [[maybe_unused]] int field) const noexcept { return oldTagType; } + RX_ALWAYS_INLINE TagType RegisterTagType(TagType oldTagType, const TagsPath &) const noexcept { return oldTagType; } + }; + class DefaultRecoder { + public: + DefaultRecoder(Recoder &r) noexcept : r_(&r), needToRecode_(false) {} + + RX_ALWAYS_INLINE DefaultRecoder MakeCleanCopy() const noexcept { return DefaultRecoder(*r_); } + + RX_ALWAYS_INLINE bool Recode(Serializer &ser, WrSerializer &wser) const { + if (needToRecode_) { + r_->Recode(ser, wser); + } + return needToRecode_; + } + RX_ALWAYS_INLINE bool Recode(Serializer &s, Payload &p, int tagName, WrSerializer &wser) const { + if (needToRecode_) { + r_->Recode(s, p, tagName, wser); + } + return needToRecode_; + } + RX_ALWAYS_INLINE TagType RegisterTagType(TagType oldTagType, int field) { + needToRecode_ = r_->Match(field); + return needToRecode_ ? r_->Type(oldTagType) : oldTagType; + } + RX_ALWAYS_INLINE TagType RegisterTagType(TagType oldTagType, const TagsPath &tagsPath) { + needToRecode_ = r_->Match(tagsPath); + return needToRecode_ ? r_->Type(oldTagType) : oldTagType; + } + + private: + Recoder *r_; + bool needToRecode_; + }; + struct NamedTagOpt {}; + struct NamelessTagOpt {}; + + template + void Decode(Payload &pl, Serializer &rdSer, WrSerializer &wrSer, FilterT filter = FilterT(), RecoderT recoder = RecoderT()) { + static_assert(std::is_same_v || std::is_same_v, + "Other filter types are not allowed for the public API"); + static_assert(std::is_same_v || std::is_same_v, + "Other recoder types are not allowed for the public API"); objectScalarIndexes_.reset(); - decodeCJson(pl, rdSer, wrSer, true); + if rx_likely (!filter.HasArraysFields(pl.Type())) { + decodeCJson(pl, rdSer, wrSer, filter, recoder, NamelessTagOpt{}); + return; + } +#ifdef RX_WITH_STDLIB_DEBUG + std::abort(); +#else + // Search of the indexed fields inside the object arrays is not imlpemented + // Possible implementation has noticable negative effect on 'FromCJSONPKOnly' benchmark. + // Currently we are using filter for PKs only, and PKs can not be arrays, so this code actually will never be called at the + // current moment + decodeCJson(pl, rdSer, wrSer, DummyFilter(), recoder, NamelessTagOpt{}); +#endif // RX_WITH_STDLIB_DEBUG } private: - bool decodeCJson(Payload &pl, Serializer &rdser, WrSerializer &wrser, bool match); + template + bool decodeCJson(Payload &pl, Serializer &rdser, WrSerializer &wrser, FilterT filter, RecoderT recoder, TagOptT); bool isInArray() const noexcept { return arrayLevel_ > 0; } + [[noreturn]] void throwTagReferenceError(ctag, const Payload &); + [[noreturn]] void throwUnexpectedArrayError(const PayloadFieldType &); TagsMatcher &tagsMatcher_; - const FieldsSet *filter_; TagsPath tagsPath_; - Recoder *recoder_{nullptr}; int32_t arrayLevel_ = 0; ScalarIndexesSetT objectScalarIndexes_; }; +extern template bool CJsonDecoder::decodeCJson( + Payload &, Serializer &, WrSerializer &, CJsonDecoder::DummyFilter, CJsonDecoder::DummyRecoder, CJsonDecoder::NamelessTagOpt); +extern template bool CJsonDecoder::decodeCJson( + Payload &, Serializer &, WrSerializer &, CJsonDecoder::DummyFilter, CJsonDecoder::DefaultRecoder, CJsonDecoder::NamelessTagOpt); +extern template bool CJsonDecoder::decodeCJson( + Payload &, Serializer &, WrSerializer &, CJsonDecoder::RestrictingFilter, CJsonDecoder::DummyRecoder, CJsonDecoder::NamelessTagOpt); +extern template bool CJsonDecoder::decodeCJson( + Payload &, Serializer &, WrSerializer &, CJsonDecoder::RestrictingFilter, CJsonDecoder::DefaultRecoder, CJsonDecoder::NamelessTagOpt); + } // namespace reindexer diff --git a/cpp_src/core/cjson/cjsontools.cc b/cpp_src/core/cjson/cjsontools.cc index 0294772ff..f7d56db0b 100644 --- a/cpp_src/core/cjson/cjsontools.cc +++ b/cpp_src/core/cjson/cjsontools.cc @@ -93,15 +93,21 @@ void copyCJsonValue(TagType tagType, Serializer &rdser, WrSerializer &wrser) { } void skipCjsonTag(ctag tag, Serializer &rdser, std::array *fieldsArrayOffsets) { - const auto field = tag.Field(); - const bool embeddedField = (field < 0); switch (tag.Type()) { case TAG_ARRAY: { + const auto field = tag.Field(); + const bool embeddedField = (field < 0); if (embeddedField) { const carraytag atag = rdser.GetCArrayTag(); - for (size_t i = 0, count = atag.Count(); i < count; ++i) { - const ctag t = atag.Type() != TAG_OBJECT ? ctag{atag.Type()} : rdser.GetCTag(); - skipCjsonTag(t, rdser); + const auto count = atag.Count(); + if (atag.Type() == TAG_OBJECT) { + for (size_t i = 0; i < count; ++i) { + skipCjsonTag(rdser.GetCTag(), rdser); + } + } else { + for (size_t i = 0; i < count; ++i) { + skipCjsonTag(ctag{atag.Type()}, rdser); + } } } else { const auto len = rdser.GetVarUint(); @@ -110,7 +116,6 @@ void skipCjsonTag(ctag tag, Serializer &rdser, std::array } } } break; - case TAG_OBJECT: for (ctag otag{rdser.GetCTag()}; otag != kCTagEnd; otag = rdser.GetCTag()) { skipCjsonTag(otag, rdser, fieldsArrayOffsets); @@ -122,12 +127,15 @@ void skipCjsonTag(ctag tag, Serializer &rdser, std::array case TAG_END: case TAG_BOOL: case TAG_NULL: - case TAG_UUID: + case TAG_UUID: { + const auto field = tag.Field(); + const bool embeddedField = (field < 0); if (embeddedField) { rdser.SkipRawVariant(KeyValueType{tag.Type()}); } else if (fieldsArrayOffsets) { (*fieldsArrayOffsets)[field] += 1; } + } } } @@ -160,4 +168,13 @@ void buildPayloadTuple(const PayloadIface &pl, const TagsMatcher *tagsMatcher template void buildPayloadTuple(const PayloadIface &, const TagsMatcher *, WrSerializer &); template void buildPayloadTuple(const PayloadIface &, const TagsMatcher *, WrSerializer &); +void throwUnexpectedNestedArrayError(std::string_view parserName, const PayloadFieldType &f) { + throw Error(errLogic, "Error parsing %s field '%s' - got value nested into the array, but expected scalar %s", parserName, f.Name(), + f.Type().Name()); +} + +void throwScalarMultipleEncodesError(const Payload &pl, const PayloadFieldType &f, int field) { + throw Error(errLogic, "Non-array field '%s' [%d] from '%s' can only be encoded once.", f.Name(), field, pl.Type().Name()); +} + } // namespace reindexer diff --git a/cpp_src/core/cjson/cjsontools.h b/cpp_src/core/cjson/cjsontools.h index 69070c1e8..553280d4e 100644 --- a/cpp_src/core/cjson/cjsontools.h +++ b/cpp_src/core/cjson/cjsontools.h @@ -17,15 +17,16 @@ void putCJsonValue(TagType tagType, int tagName, const VariantArray &values, WrS void skipCjsonTag(ctag tag, Serializer &rdser, std::array *fieldsArrayOffsets = nullptr); [[nodiscard]] Variant cjsonValueToVariant(TagType tag, Serializer &rdser, KeyValueType dstType); +[[noreturn]] void throwUnexpectedNestedArrayError(std::string_view parserName, const PayloadFieldType &f); +[[noreturn]] void throwScalarMultipleEncodesError(const Payload &pl, const PayloadFieldType &f, int field); RX_ALWAYS_INLINE void validateNonArrayFieldRestrictions(const ScalarIndexesSetT &scalarIndexes, const Payload &pl, const PayloadFieldType &f, int field, bool isInArray, std::string_view parserName) { if (!f.IsArray()) { if rx_unlikely (isInArray) { - throw Error(errLogic, "Error parsing %s field '%s' - got value nested into the array, but expected scalar %s", parserName, - f.Name(), f.Type().Name()); + throwUnexpectedNestedArrayError(parserName, f); } if rx_unlikely (scalarIndexes.test(field)) { - throw Error(errLogic, "Non-array field '%s' [%d] from '%s' can only be encoded once.", f.Name(), field, pl.Type().Name()); + throwScalarMultipleEncodesError(pl, f, field); } } } diff --git a/cpp_src/core/cjson/ctag.h b/cpp_src/core/cjson/ctag.h index f752ce006..b591ad616 100644 --- a/cpp_src/core/cjson/ctag.h +++ b/cpp_src/core/cjson/ctag.h @@ -14,7 +14,6 @@ namespace reindexer { class Serializer; class WrSerializer; -} // namespace reindexer class ctag { friend class reindexer::Serializer; @@ -52,8 +51,8 @@ class ctag { [[nodiscard]] constexpr bool operator!=(ctag other) const noexcept { return !operator==(other); } private: - explicit constexpr ctag(uint32_t tag) noexcept : ctag{typeImpl(tag), nameImpl(tag), fieldImpl(tag)} { assertrx(tag == tag_); } - explicit constexpr ctag(uint64_t tag) noexcept : ctag{typeImpl(tag), nameImpl(tag), fieldImpl(tag)} { assertrx(tag == tag_); } + explicit constexpr ctag(uint32_t tag) noexcept : ctag{typeImpl(tag), nameImpl(tag), fieldImpl(tag)} { assertrx_dbg(tag == tag_); } + explicit constexpr ctag(uint64_t tag) noexcept : ctag{typeImpl(tag), nameImpl(tag), fieldImpl(tag)} { assertrx_dbg(tag == tag_); } [[nodiscard]] constexpr static TagType typeImpl(uint32_t tag) noexcept { return static_cast((tag & kTypeMask) | ((tag >> kType1Offset) & kInvertedTypeMask)); } @@ -90,7 +89,7 @@ class carraytag { [[nodiscard]] constexpr bool operator!=(carraytag other) const noexcept { return !operator==(other); } private: - explicit constexpr carraytag(uint32_t atag) noexcept : carraytag{countImpl(atag), typeImpl(atag)} { assertrx(atag == atag_); } + explicit constexpr carraytag(uint32_t atag) noexcept : carraytag{countImpl(atag), typeImpl(atag)} { assertrx_dbg(atag == atag_); } [[nodiscard]] constexpr uint32_t asNumber() const noexcept { return atag_; } [[nodiscard]] static constexpr TagType typeImpl(uint32_t atag) noexcept { return static_cast((atag >> kCountBits) & kTypeMask); @@ -99,3 +98,5 @@ class carraytag { uint32_t atag_; }; + +} // namespace reindexer diff --git a/cpp_src/core/cjson/fieldextractor.h b/cpp_src/core/cjson/fieldextractor.h index 6b11f179a..43ac72d64 100644 --- a/cpp_src/core/cjson/fieldextractor.h +++ b/cpp_src/core/cjson/fieldextractor.h @@ -16,7 +16,7 @@ class FieldsExtractor { }; FieldsExtractor() = default; - FieldsExtractor(VariantArray *va, KeyValueType expectedType, int expectedPathDepth, FieldsSet *filter = nullptr, + FieldsExtractor(VariantArray *va, KeyValueType expectedType, int expectedPathDepth, const FieldsSet *filter, FieldParams *params = nullptr) noexcept : values_(va), expectedType_(expectedType), expectedPathDepth_(expectedPathDepth), filter_(filter), params_(params) {} FieldsExtractor(FieldsExtractor &&other) = default; @@ -176,7 +176,7 @@ class FieldsExtractor { VariantArray *values_ = nullptr; KeyValueType expectedType_{KeyValueType::Undefined{}}; int expectedPathDepth_ = 0; - FieldsSet *filter_; + const FieldsSet *filter_; FieldParams *params_; }; diff --git a/cpp_src/core/cjson/tagsmatcher.h b/cpp_src/core/cjson/tagsmatcher.h index 8bb8786b0..f73d3cbe1 100644 --- a/cpp_src/core/cjson/tagsmatcher.h +++ b/cpp_src/core/cjson/tagsmatcher.h @@ -21,7 +21,7 @@ class TagsMatcher { int res = impl_->name2tag(name); return res ? res : impl_.clone()->name2tag(name, canAdd, updated_); } - int tags2field(const int16_t* path, size_t pathLen) const { return impl_->tags2field(path, pathLen); } + int tags2field(const int16_t* path, size_t pathLen) const noexcept { return impl_->tags2field(path, pathLen); } const std::string& tag2name(int tag) const { return impl_->tag2name(tag); } TagsPath path2tag(std::string_view jsonPath) const { return impl_->path2tag(jsonPath); } TagsPath path2tag(std::string_view jsonPath, bool canAdd) { diff --git a/cpp_src/core/cjson/tagsmatcherimpl.h b/cpp_src/core/cjson/tagsmatcherimpl.h index e96109868..5b508d4e1 100644 --- a/cpp_src/core/cjson/tagsmatcherimpl.h +++ b/cpp_src/core/cjson/tagsmatcherimpl.h @@ -4,7 +4,6 @@ #include #include -#include "core/keyvalue/key_string.h" #include "core/payload/payloadtype.h" #include "core/payload/payloadtypeimpl.h" #include "ctag.h" @@ -75,25 +74,29 @@ class TagsMatcherImpl { if (content == "*"sv) { node.MarkAllItems(true); } else { - int index = stoi(content); - if (index == 0 && content != "0"sv && ev) { - VariantArray values = ev(content); - if (values.size() != 1) { - throw Error(errParams, "Index expression_ has wrong syntax: '%s'", content); + auto index = try_stoi(content); + if (!index) { + if (ev) { + VariantArray values = ev(content); + if (values.size() != 1) { + throw Error(errParams, "Index expression_ has wrong syntax: '%s'", content); + } + values.front().Type().EvaluateOneOf( + [](OneOf) noexcept {}, + [&](OneOf) { + throw Error(errParams, "Wrong type of index: '%s'", content); + }); + node.SetExpression(content); + index = values.front().As(); + } else { + throw Error(errParams, "Can't convert '%s' to number", content); } - values.front().Type().EvaluateOneOf( - [](OneOf) noexcept {}, - [&](OneOf) { - throw Error(errParams, "Wrong type of index: '%s'", content); - }); - node.SetExpression(content); - index = values.front().As(); } if (index < 0) { throw Error(errLogic, "Array index value cannot be negative"); } - node.SetIndex(index); + node.SetIndex(*index); } field = field.substr(0, openBracketPos); } @@ -140,7 +143,7 @@ class TagsMatcherImpl { return tags2names_[tag - 1]; } - int tags2field(const int16_t *path, size_t pathLen) const { + int tags2field(const int16_t *path, size_t pathLen) const noexcept { if (!pathLen) return -1; return pathCache_.lookup(path, pathLen); } diff --git a/cpp_src/core/cjson/tagspathcache.h b/cpp_src/core/cjson/tagspathcache.h index fecd8d3de..26e0240f9 100644 --- a/cpp_src/core/cjson/tagspathcache.h +++ b/cpp_src/core/cjson/tagspathcache.h @@ -31,7 +31,7 @@ class TagsPathCache { len--; } } - int lookup(const int16_t *tagsPath, size_t len) const { + int lookup(const int16_t *tagsPath, size_t len) const noexcept { assertrx(len); auto cache = this; for (;;) { @@ -51,7 +51,7 @@ class TagsPathCache { } } - void walk(int16_t *path, int depth, const std::function& visitor) const { + void walk(int16_t *path, int depth, const std::function &visitor) const { int16_t &i = path[depth]; for (i = 0; i < int(entries_.size()); i++) { if (entries_[i].field_ > 0) visitor(depth + 1, entries_[i].field_); diff --git a/cpp_src/core/comparator.cc b/cpp_src/core/comparator.cc index 08d001339..c105f53f2 100644 --- a/cpp_src/core/comparator.cc +++ b/cpp_src/core/comparator.cc @@ -82,16 +82,11 @@ void Comparator::Bind(const PayloadType &type, int field) { } } -void Comparator::BindEqualPosition(int field, const VariantArray &val, CondType cond) { cmpEqualPosition.BindField(field, val, cond); } - -void Comparator::BindEqualPosition(const TagsPath &tagsPath, const VariantArray &val, CondType cond) { - cmpEqualPosition.BindField(tagsPath, val, cond); -} - bool Comparator::isNumericComparison(const VariantArray &values) const { if (valuesType_.Is() || values.empty()) return false; const KeyValueType keyType{values.front().Type()}; - return !valuesType_.IsSame(keyType) && (valuesType_.Is() || keyType.Is()); + return !valuesType_.IsSame(keyType) && ((valuesType_.Is() && !keyType.Is()) || + (keyType.Is() && !valuesType_.Is())); } bool Comparator::Compare(const PayloadValue &data, int rowId) { @@ -191,7 +186,7 @@ void Comparator::ExcludeDistinct(const PayloadValue &data, int rowId) { } else { // Exclude field from payload by offset (fast path) - assertrx(!type_.Is()); + assertrx_throw(!type_.Is()); // Check if we have column (rawData_), then go to fastest path with column if (rawData_) return excludeDistinct(rawData_ + rowId * sizeof_); diff --git a/cpp_src/core/comparator.h b/cpp_src/core/comparator.h index 3760077c8..7b1a05445 100644 --- a/cpp_src/core/comparator.h +++ b/cpp_src/core/comparator.h @@ -15,8 +15,10 @@ class Comparator : public ComparatorVars { bool Compare(const PayloadValue &lhs, int rowId); void ExcludeDistinct(const PayloadValue &, int rowId); void Bind(const PayloadType &type, int field); - void BindEqualPosition(int field, const VariantArray &val, CondType cond); - void BindEqualPosition(const TagsPath &tagsPath, const VariantArray &val, CondType cond); + template + void BindEqualPosition(F &&field, const VariantArray &val, CondType cond) { + cmpEqualPosition.BindField(std::forward(field), val, cond); + } void ClearDistinct() { cmpInt.ClearDistinct(); cmpBool.ClearDistinct(); @@ -116,7 +118,7 @@ class Comparator : public ComparatorVars { ComparatorImpl cmpComposite; ComparatorImpl cmpGeom; ComparatorImpl cmpUuid; - CompositeArrayComparator cmpEqualPosition; + EqualPositionComparator cmpEqualPosition; KeyValueType valuesType_{KeyValueType::Undefined{}}; }; diff --git a/cpp_src/core/comparatorimpl.h b/cpp_src/core/comparatorimpl.h index 41f0aa486..5da4466bc 100644 --- a/cpp_src/core/comparatorimpl.h +++ b/cpp_src/core/comparatorimpl.h @@ -349,10 +349,12 @@ class ComparatorImpl { void SetValues(CondType cond, const VariantArray &values, const ComparatorVars &vars) { if (cond == CondSet) { valuesSet_.reset(new intrusive_atomic_rc_wrapper( - values.size(), hash_composite(vars.payloadType_, vars.fields_), equal_composite(vars.payloadType_, vars.fields_))); + values.size(), hash_composite(PayloadType{vars.payloadType_}, FieldsSet{vars.fields_}), + equal_composite(PayloadType{vars.payloadType_}, FieldsSet{vars.fields_}))); } else if (cond == CondAllSet) { valuesSet_.reset(new intrusive_atomic_rc_wrapper( - values.size(), hash_composite(vars.payloadType_, vars.fields_), equal_composite(vars.payloadType_, vars.fields_))); + values.size(), hash_composite(PayloadType{vars.payloadType_}, FieldsSet{vars.fields_}), + equal_composite(PayloadType{vars.payloadType_}, FieldsSet{vars.fields_}))); allSetValuesSet_.reset(new intrusive_atomic_rc_wrapper>{}); } diff --git a/cpp_src/core/compositearraycomparator.cc b/cpp_src/core/compositearraycomparator.cc index 6e58fdf98..4ffb48e83 100644 --- a/cpp_src/core/compositearraycomparator.cc +++ b/cpp_src/core/compositearraycomparator.cc @@ -2,37 +2,29 @@ #include namespace reindexer { -CompositeArrayComparator::CompositeArrayComparator() {} +void EqualPositionComparator::BindField(int field, const VariantArray &values, CondType cond) { bindField(field, values, cond); } -void CompositeArrayComparator::BindField(int field, const VariantArray &values, CondType condType) { +void EqualPositionComparator::BindField(const FieldsPath &fieldPath, const VariantArray &values, CondType cond) { + bindField(fieldPath, values, cond); +} + +template +void EqualPositionComparator::bindField(F field, const VariantArray &values, CondType cond) { fields_.push_back(field); Context &ctx = ctx_.emplace_back(); - ctx.cond = condType; - ctx.cmpBool.SetValues(condType, values); - ctx.cmpInt.SetValues(condType, values); - ctx.cmpInt64.SetValues(condType, values); - ctx.cmpString.SetValues(condType, values, CollateOpts()); - ctx.cmpDouble.SetValues(condType, values); - ctx.cmpUuid.SetValues(condType, values); + ctx.cond = cond; + ctx.cmpBool.SetValues(cond, values); + ctx.cmpInt.SetValues(cond, values); + ctx.cmpInt64.SetValues(cond, values); + ctx.cmpString.SetValues(cond, values, CollateOpts()); + ctx.cmpDouble.SetValues(cond, values); + ctx.cmpUuid.SetValues(cond, values); assertrx(ctx_.size() == fields_.size()); } -void CompositeArrayComparator::BindField(const TagsPath &tagsPath, const VariantArray &values, CondType condType) { - fields_.push_back(tagsPath); - Context &ctx = ctx_.emplace_back(); - - ctx.cond = condType; - ctx.cmpBool.SetValues(condType, values); - ctx.cmpInt.SetValues(condType, values); - ctx.cmpInt64.SetValues(condType, values); - ctx.cmpString.SetValues(condType, values, CollateOpts()); - ctx.cmpDouble.SetValues(condType, values); - ctx.cmpUuid.SetValues(condType, values); -} - -bool CompositeArrayComparator::Compare(const PayloadValue &pv, const ComparatorVars &vars) { +bool EqualPositionComparator::Compare(const PayloadValue &pv, const ComparatorVars &vars) { ConstPayload pl(vars.payloadType_, pv); size_t len = INT_MAX; @@ -64,7 +56,7 @@ bool CompositeArrayComparator::Compare(const PayloadValue &pv, const ComparatorV return false; } -bool CompositeArrayComparator::compareField(size_t field, const Variant &v, const ComparatorVars &vars) { +bool EqualPositionComparator::compareField(size_t field, const Variant &v, const ComparatorVars &vars) { return v.Type().EvaluateOneOf( [&](KeyValueType::Bool) { return ctx_[field].cmpBool.Compare(ctx_[field].cond, static_cast(v)); }, [&](KeyValueType::Int) { return ctx_[field].cmpInt.Compare(ctx_[field].cond, static_cast(v)); }, diff --git a/cpp_src/core/compositearraycomparator.h b/cpp_src/core/compositearraycomparator.h index d646ed6b4..a58735dc6 100644 --- a/cpp_src/core/compositearraycomparator.h +++ b/cpp_src/core/compositearraycomparator.h @@ -5,17 +5,19 @@ namespace reindexer { -class CompositeArrayComparator { +class EqualPositionComparator { public: - CompositeArrayComparator(); + EqualPositionComparator() noexcept = default; - void BindField(int field, const VariantArray &values, CondType condType); - void BindField(const TagsPath &tagsPath, const VariantArray &values, CondType condType); - bool Compare(const PayloadValue &pv, const ComparatorVars &vars); + void BindField(int field, const VariantArray &, CondType); + void BindField(const FieldsPath &, const VariantArray &, CondType); + bool Compare(const PayloadValue &, const ComparatorVars &); bool IsBinded() { return !ctx_.empty(); } private: - bool compareField(size_t field, const Variant &v, const ComparatorVars &vars); + bool compareField(size_t field, const Variant &, const ComparatorVars &); + template + void bindField(F field, const VariantArray &, CondType); struct Context { CondType cond; diff --git a/cpp_src/core/dbconfig.cc b/cpp_src/core/dbconfig.cc index d2b4aa462..60ebb6550 100644 --- a/cpp_src/core/dbconfig.cc +++ b/cpp_src/core/dbconfig.cc @@ -45,8 +45,7 @@ Error DBConfigProvider::FromJSON(const gason::JsonNode &root) { LongTxLoggingParams{profilingNode["long_queries_logging"]["transaction"]["threshold_us"].As(), profilingNode["long_queries_logging"]["transaction"]["avg_step_threshold_us"].As()}); } - auto it = handlers_.find(ProfilingConf); - if (it != handlers_.end()) (it->second)(); + if (handlers_[ProfilingConf]) (handlers_[ProfilingConf])(); } auto &namespacesNode = root["namespaces"]; @@ -73,10 +72,29 @@ Error DBConfigProvider::FromJSON(const gason::JsonNode &root) { data.maxPreselectPart = nsNode["max_preselect_part"].As(data.maxPreselectPart, 0.0, 1.0); data.idxUpdatesCountingMode = nsNode["index_updates_counting_mode"].As(data.idxUpdatesCountingMode); data.syncStorageFlushLimit = nsNode["sync_storage_flush_limit"].As(data.syncStorageFlushLimit, 0); + + auto cacheConfig = nsNode["cache"]; + if (!cacheConfig.empty()) { + data.cacheConfig.idxIdsetCacheSize = + cacheConfig["index_idset_cache_size"].As(data.cacheConfig.idxIdsetCacheSize, 0); + data.cacheConfig.idxIdsetHitsToCache = + cacheConfig["index_idset_hits_to_cache"].As(data.cacheConfig.idxIdsetHitsToCache, 0); + data.cacheConfig.ftIdxCacheSize = cacheConfig["ft_index_cache_size"].As(data.cacheConfig.ftIdxCacheSize, 0); + data.cacheConfig.ftIdxHitsToCache = + cacheConfig["ft_index_hits_to_cache"].As(data.cacheConfig.ftIdxHitsToCache, 0); + data.cacheConfig.joinCacheSize = + cacheConfig["joins_preselect_cache_size"].As(data.cacheConfig.joinCacheSize, 0); + data.cacheConfig.joinHitsToCache = + cacheConfig["joins_preselect_hit_to_cache"].As(data.cacheConfig.joinHitsToCache, 0); + data.cacheConfig.queryCountCacheSize = + cacheConfig["query_count_cache_size"].As(data.cacheConfig.queryCountCacheSize, 0); + data.cacheConfig.queryCountHitsToCache = + cacheConfig["query_count_hit_to_cache"].As(data.cacheConfig.queryCountHitsToCache, 0); + } + namespacesData_.emplace(nsNode["namespace"].As(), std::move(data)); // NOLINT(performance-move-const-arg) } - auto it = handlers_.find(NamespaceDataConf); - if (it != handlers_.end()) (it->second)(); + if (handlers_[NamespaceDataConf]) (handlers_[NamespaceDataConf])(); } auto &replicationNode = root["replication"]; @@ -84,8 +102,7 @@ Error DBConfigProvider::FromJSON(const gason::JsonNode &root) { auto err = replicationData_.FromJSON(replicationNode); if (!err.ok()) return err; - auto it = handlers_.find(ReplicationConf); - if (it != handlers_.end()) (it->second)(); + if (handlers_[ReplicationConf]) (handlers_[ReplicationConf])(); } return errOK; } catch (const Error &err) { @@ -105,11 +122,11 @@ ReplicationConfigData DBConfigProvider::GetReplicationConfig() { return replicationData_; } -bool DBConfigProvider::GetNamespaceConfig(const std::string &nsName, NamespaceConfigData &data) { +bool DBConfigProvider::GetNamespaceConfig(std::string_view nsName, NamespaceConfigData &data) { shared_lock lk(mtx_); auto it = namespacesData_.find(nsName); if (it == namespacesData_.end()) { - it = namespacesData_.find("*"); + it = namespacesData_.find(std::string_view("*")); } if (it == namespacesData_.end()) { data = {}; diff --git a/cpp_src/core/dbconfig.h b/cpp_src/core/dbconfig.h index cbb4b1bed..fca5bae6d 100644 --- a/cpp_src/core/dbconfig.h +++ b/cpp_src/core/dbconfig.h @@ -2,7 +2,7 @@ #include #include -#include +#include "estl/fast_hash_map.h" #include "estl/fast_hash_set.h" #include "estl/mutex.h" #include "estl/shared_mutex.h" @@ -18,7 +18,13 @@ class JsonBuilder; class RdxContext; class WrSerializer; -enum ConfigType { ProfilingConf, NamespaceDataConf, ReplicationConf }; +enum ConfigType { + ProfilingConf = 0, + NamespaceDataConf, + ReplicationConf, + // + kConfigTypesTotalCount +}; class LongQueriesLoggingParams { public: @@ -65,6 +71,31 @@ class ProfilingConfigData { std::atomic longTxLoggingParams; }; +constexpr size_t kDefaultCacheSizeLimit = 1024 * 1024 * 128; +constexpr uint32_t kDefaultHitCountToCache = 2; + +struct NamespaceCacheConfigData { + bool IsIndexesCacheEqual(const NamespaceCacheConfigData &o) noexcept { + return idxIdsetCacheSize == o.idxIdsetCacheSize && idxIdsetHitsToCache == o.idxIdsetHitsToCache && + ftIdxCacheSize == o.ftIdxCacheSize && ftIdxHitsToCache == o.ftIdxHitsToCache; + } + bool IsJoinCacheEqual(const NamespaceCacheConfigData &o) noexcept { + return joinCacheSize == o.joinCacheSize && joinHitsToCache == o.joinHitsToCache; + } + bool IsQueryCountCacheEqual(const NamespaceCacheConfigData &o) noexcept { + return queryCountCacheSize == o.queryCountCacheSize && queryCountHitsToCache == o.queryCountHitsToCache; + } + + uint64_t idxIdsetCacheSize = kDefaultCacheSizeLimit; + uint32_t idxIdsetHitsToCache = kDefaultHitCountToCache; + uint64_t ftIdxCacheSize = kDefaultCacheSizeLimit; + uint32_t ftIdxHitsToCache = kDefaultHitCountToCache; + uint64_t joinCacheSize = 2 * kDefaultCacheSizeLimit; + uint32_t joinHitsToCache = kDefaultHitCountToCache; + uint64_t queryCountCacheSize = kDefaultCacheSizeLimit; + uint32_t queryCountHitsToCache = kDefaultHitCountToCache; +}; + struct NamespaceConfigData { bool lazyLoad = false; int noQueryIdleThreshold = 0; @@ -82,6 +113,7 @@ struct NamespaceConfigData { double maxPreselectPart = 0.1; bool idxUpdatesCountingMode = false; int syncStorageFlushLimit = 20000; + NamespaceCacheConfigData cacheConfig; }; enum ReplicationRole { ReplicationNone, ReplicationMaster, ReplicationSlave, ReplicationReadOnly }; @@ -133,7 +165,7 @@ class DBConfigProvider { void setHandler(ConfigType cfgType, std::function handler); ReplicationConfigData GetReplicationConfig(); - bool GetNamespaceConfig(const std::string &nsName, NamespaceConfigData &data); + bool GetNamespaceConfig(std::string_view nsName, NamespaceConfigData &data); LongQueriesLoggingParams GetSelectLoggingParams() const noexcept { return profilingData_.longSelectLoggingParams.load(std::memory_order_relaxed); } @@ -150,8 +182,8 @@ class DBConfigProvider { private: ProfilingConfigData profilingData_; ReplicationConfigData replicationData_; - std::unordered_map namespacesData_; - std::unordered_map> handlers_; + fast_hash_map namespacesData_; + std::array, kConfigTypesTotalCount> handlers_; shared_timed_mutex mtx_; }; diff --git a/cpp_src/core/defnsconfigs.h b/cpp_src/core/defnsconfigs.h index 36bcd02b4..04fec8aa3 100644 --- a/cpp_src/core/defnsconfigs.h +++ b/cpp_src/core/defnsconfigs.h @@ -56,7 +56,17 @@ const std::vector kDefDBConfig = { "max_preselect_size":1000, "max_preselect_part":0.1, "index_updates_counting_mode":false, - "sync_storage_flush_limit":20000 + "sync_storage_flush_limit":20000, + "cache":{ + "index_idset_cache_size":134217728, + "index_idset_hits_to_cache":2, + "ft_index_cache_size":134217728, + "ft_index_hits_to_cache":2, + "joins_preselect_cache_size":268435456, + "joins_preselect_hit_to_cache":2, + "query_count_cache_size":134217728, + "query_count_hit_to_cache":2 + } } ] })json", @@ -114,7 +124,6 @@ const std::vector kSystemNsDefs = { .AddIndex("last_sec_avg_lock_time_us", "-", "int64", IndexOpts().Dense()) .AddIndex("latency_stddev", "-", "double", IndexOpts().Dense()), NamespaceDef(kNamespacesNamespace, StorageOpts()).AddIndex(kNsNameField, "hash", "string", IndexOpts().PK()), - NamespaceDef(kPerfStatsNamespace, StorageOpts()).AddIndex(kNsNameField, "hash", "string", IndexOpts().PK()), NamespaceDef(kMemStatsNamespace, StorageOpts()) .AddIndex(kNsNameField, "hash", "string", IndexOpts().PK()) .AddIndex("items_count", "-", "int64", IndexOpts().Dense()) diff --git a/cpp_src/core/expressiontree.h b/cpp_src/core/expressiontree.h index 6f4147452..0eeaced61 100644 --- a/cpp_src/core/expressiontree.h +++ b/cpp_src/core/expressiontree.h @@ -283,9 +283,10 @@ class ExpressionTree { }; public: - Node() : storage_{SubTree{1}} {} + Node() : storage_{std::in_place_type, 1} {} template - Node(OperationType op, size_t s, Args&&... args) : storage_{SubTree{s, std::forward(args)...}}, operation{op} {} + Node(OperationType op, size_t s, Args&&... args) + : storage_{std::in_place_type, s, std::forward(args)...}, operation{op} {} template Node(OperationType op, T&& v) : storage_{std::forward(v)}, operation{op} {} Node(const Node& other) : storage_{other.storage_}, operation{other.operation} {} @@ -363,6 +364,10 @@ class ExpressionTree { void SetValue(T&& v) { storage_ = std::forward(v); } + template + void Emplace(Args&&... args) { + storage_.template emplace(std::forward(args)...); + } private: Storage storage_; @@ -443,6 +448,15 @@ class ExpressionTree { } container_.emplace_back(op, v); } + /// Appends value to the last openned subtree + template + void Append(OperationType op, Args&&... args) { + for (unsigned i : activeBrackets_) { + assertrx(i < container_.size()); + container_[i].Append(); + } + container_.emplace_back(op, T{std::forward(args)...}); + } class const_iterator; /// Appends all nodes from the interval to the last openned subtree void Append(const_iterator begin, const_iterator end) { diff --git a/cpp_src/core/ft/ftsetcashe.h b/cpp_src/core/ft/ftsetcashe.h index 810a06ccd..95e64f014 100644 --- a/cpp_src/core/ft/ftsetcashe.h +++ b/cpp_src/core/ft/ftsetcashe.h @@ -17,6 +17,6 @@ struct FtIdSetCacheVal { FtCtx::Data::Ptr ctx; }; -class FtIdSetCache : public LRUCache {}; +using FtIdSetCache = LRUCache; } // namespace reindexer diff --git a/cpp_src/core/iclientsstats.cc b/cpp_src/core/iclientsstats.cc index c244d5b33..2e52eb5d1 100644 --- a/cpp_src/core/iclientsstats.cc +++ b/cpp_src/core/iclientsstats.cc @@ -8,6 +8,7 @@ void ClientStat::GetJSON(WrSerializer& ser) const { JsonBuilder builder(ser); builder.Put("connection_id", connectionId); builder.Put("ip", ip); + builder.Put("protocol", protocol); builder.Put("user_name", userName); builder.Put("db_name", dbName); builder.Put("current_activity", currentActivity); diff --git a/cpp_src/core/iclientsstats.h b/cpp_src/core/iclientsstats.h index a19f1989f..5c319034a 100644 --- a/cpp_src/core/iclientsstats.h +++ b/cpp_src/core/iclientsstats.h @@ -7,11 +7,15 @@ namespace reindexer { +constexpr std::string_view kTcpProtocolName = "tcp"; +constexpr std::string_view kUnixProtocolName = "unix"; + class WrSerializer; struct ClientStat { void GetJSON(WrSerializer& ser) const; int connectionId = 0; + std::string_view protocol = kTcpProtocolName; std::string ip; std::string userName; std::string dbName; @@ -43,6 +47,7 @@ struct ClientConnectionStat { std::shared_ptr connectionStat; std::shared_ptr txStats; std::string ip; + std::string_view protocol = kTcpProtocolName; std::string userName; std::string dbName; std::string userRights; diff --git a/cpp_src/core/idset.cc b/cpp_src/core/idset.cc index c8576fc94..da7047de7 100644 --- a/cpp_src/core/idset.cc +++ b/cpp_src/core/idset.cc @@ -1,18 +1,8 @@ #include "core/idset.h" -#include #include "tools/errors.h" namespace reindexer { -void IdSet::Commit() { - if (!size() && set_) { - resize(0); - for (auto id : *set_) push_back(id); - } - - usingBtree_.store(false, std::memory_order_release); -} - std::string IdSetPlain::Dump() const { std::string buf = "["; diff --git a/cpp_src/core/idset.h b/cpp_src/core/idset.h index 6c193f9c4..81522fcef 100644 --- a/cpp_src/core/idset.h +++ b/cpp_src/core/idset.h @@ -74,7 +74,7 @@ class IdSetPlain : protected base_idset { std::string Dump() const; protected: - IdSetPlain(base_idset &&idset) : base_idset(std::move(idset)) {} + IdSetPlain(base_idset &&idset) noexcept : base_idset(std::move(idset)) {} }; std::ostream &operator<<(std::ostream &, const IdSetPlain &); @@ -87,7 +87,7 @@ class IdSet : public IdSetPlain { public: using Ptr = intrusive_ptr>; - IdSet() : usingBtree_(false) {} + IdSet() noexcept : usingBtree_(false) {} IdSet(const IdSet &other) : IdSetPlain(other), set_(!other.set_ ? nullptr : new base_idsetset(*other.set_)), usingBtree_(other.usingBtree_.load()) {} IdSet(IdSet &&other) noexcept : IdSetPlain(std::move(other)), set_(std::move(other.set_)), usingBtree_(other.usingBtree_.load()) {} @@ -194,18 +194,24 @@ class IdSet : public IdSetPlain { base_idset::erase(d.first, d.second); return d.second - d.first; } else { - resize(0); + clear(); usingBtree_.store(true, std::memory_order_release); return set_->erase(id); } - return 0; } - void Commit(); - bool IsCommited() const { return !usingBtree_.load(std::memory_order_acquire); } - bool IsEmpty() const { return empty() && (!set_ || set_->empty()); } - size_t Size() const { return usingBtree_.load(std::memory_order_acquire) ? set_->size() : size(); } - size_t BTreeSize() const { return set_ ? sizeof(*set_.get()) + set_->size() * sizeof(int) : 0; } - const base_idsetset *BTree() const { return set_.get(); } + void Commit() { + if (!size() && set_) { + reserve(set_->size()); + for (auto id : *set_) push_back(id); + } + + usingBtree_.store(false, std::memory_order_release); + } + bool IsCommited() const noexcept { return !usingBtree_.load(std::memory_order_acquire); } + bool IsEmpty() const noexcept { return empty() && (!set_ || set_->empty()); } + size_t Size() const noexcept { return usingBtree_.load(std::memory_order_acquire) ? set_->size() : size(); } + size_t BTreeSize() const noexcept { return set_ ? sizeof(*set_.get()) + set_->size() * sizeof(int) : 0; } + const base_idsetset *BTree() const noexcept { return set_.get(); } void ReserveForSorted(int sortedIdxCount) { reserve(((set_ ? set_->size() : size())) * (sortedIdxCount + 1)); } protected: @@ -214,7 +220,7 @@ class IdSet : public IdSetPlain { template friend class BtreeIndexReverseIteratorImpl; - IdSet(base_idset &&idset) : IdSetPlain(std::move(idset)), usingBtree_(false) {} + IdSet(base_idset &&idset) noexcept : IdSetPlain(std::move(idset)), usingBtree_(false) {} std::unique_ptr set_; std::atomic usingBtree_; diff --git a/cpp_src/core/idsetcache.h b/cpp_src/core/idsetcache.h index 3c2fd9184..185073c22 100644 --- a/cpp_src/core/idsetcache.h +++ b/cpp_src/core/idsetcache.h @@ -83,8 +83,11 @@ struct hash_idset_cache_key { size_t operator()(const IdSetCacheKey &s) const { return (s.cond << 8) ^ (s.sort << 16) ^ s.keys->Hash(); } }; -class IdSetCache : public LRUCache { +using IdSetCacheBase = LRUCache; + +class IdSetCache : public IdSetCacheBase { public: + IdSetCache(size_t sizeLimit, uint32_t hitCount) : IdSetCacheBase(sizeLimit, hitCount) {} void ClearSorted(const std::bitset &s) { if (s.any()) { Clear([&s](const IdSetCacheKey &k) { return s.test(k.sort); }); diff --git a/cpp_src/core/index/index.cc b/cpp_src/core/index/index.cc index 552033fc2..cd3ba6d3c 100644 --- a/cpp_src/core/index/index.cc +++ b/cpp_src/core/index/index.cc @@ -1,5 +1,4 @@ #include "index.h" -#include "core/namespacedef.h" #include "indexordered.h" #include "indextext/fastindextext.h" #include "indextext/fuzzyindextext.h" @@ -10,8 +9,8 @@ namespace reindexer { -Index::Index(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields) - : type_(idef.Type()), name_(idef.name_), opts_(idef.opts_), payloadType_(std::move(payloadType)), fields_(fields) { +Index::Index(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields) + : type_(idef.Type()), name_(idef.name_), opts_(idef.opts_), payloadType_(std::move(payloadType)), fields_(std::move(fields)) { logPrintf(LogTrace, "Index::Index ('%s',%s,%s) %s%s%s", idef.name_, idef.indexType_, idef.fieldType_, idef.opts_.IsPK() ? ",pk" : "", idef.opts_.IsDense() ? ",dense" : "", idef.opts_.IsArray() ? ",array" : ""); } @@ -28,38 +27,39 @@ Index::Index(const Index& obj) selectKeyType_(obj.selectKeyType_), sortedIdxCount_(obj.sortedIdxCount_) {} -std::unique_ptr Index::New(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields) { +std::unique_ptr Index::New(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields, + const NamespaceCacheConfigData& cacheCfg) { switch (idef.Type()) { case IndexStrBTree: case IndexIntBTree: case IndexDoubleBTree: case IndexInt64BTree: case IndexCompositeBTree: - return IndexOrdered_New(idef, std::move(payloadType), fields); + return IndexOrdered_New(idef, std::move(payloadType), std::move(fields), cacheCfg); case IndexStrHash: case IndexIntHash: case IndexInt64Hash: case IndexCompositeHash: - return IndexUnordered_New(idef, std::move(payloadType), fields); + return IndexUnordered_New(idef, std::move(payloadType), std::move(fields), cacheCfg); case IndexIntStore: case IndexStrStore: case IndexInt64Store: case IndexDoubleStore: case IndexBool: case IndexUuidStore: - return IndexStore_New(idef, std::move(payloadType), fields); + return IndexStore_New(idef, std::move(payloadType), std::move(fields)); case IndexFastFT: case IndexCompositeFastFT: - return FastIndexText_New(idef, std::move(payloadType), fields); + return FastIndexText_New(idef, std::move(payloadType), std::move(fields), cacheCfg); case IndexFuzzyFT: case IndexCompositeFuzzyFT: - return FuzzyIndexText_New(idef, std::move(payloadType), fields); + return FuzzyIndexText_New(idef, std::move(payloadType), std::move(fields), cacheCfg); case IndexTtl: - return TtlIndex_New(idef, std::move(payloadType), fields); + return TtlIndex_New(idef, std::move(payloadType), std::move(fields), cacheCfg); case ::IndexRTree: - return IndexRTree_New(idef, std::move(payloadType), fields); + return IndexRTree_New(idef, std::move(payloadType), std::move(fields), cacheCfg); case IndexUuidHash: - return IndexUuid_New(idef, std::move(payloadType), fields); + return IndexUuid_New(idef, std::move(payloadType), std::move(fields), cacheCfg); } throw Error(errParams, "Ivalid index type %d for index '%s'", idef.Type(), idef.name_); } diff --git a/cpp_src/core/index/index.h b/cpp_src/core/index/index.h index 136a97fdf..0e4fe3ccf 100644 --- a/cpp_src/core/index/index.h +++ b/cpp_src/core/index/index.h @@ -12,7 +12,6 @@ #include "core/payload/payloadiface.h" #include "core/perfstatcounter.h" #include "core/selectkeyresult.h" -#include "core/type_consts_helpers.h" #include "ft_preselect.h" #include "indexiterator.h" @@ -46,7 +45,7 @@ class Index { using KeyEntry = reindexer::KeyEntry; using KeyEntryPlain = reindexer::KeyEntry; - Index(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields); + Index(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields); Index(const Index&); Index& operator=(const Index&) = delete; virtual ~Index() = default; @@ -82,20 +81,24 @@ class Index { virtual bool IsDestroyPartSupported() const noexcept { return false; } virtual void AddDestroyTask(tsl::detail_sparse_hash::ThreadTaskQueue&) {} - const PayloadType& GetPayloadType() const { return payloadType_; } - void UpdatePayloadType(PayloadType payloadType) { payloadType_ = std::move(payloadType); } + const PayloadType& GetPayloadType() const& { return payloadType_; } + const PayloadType& GetPayloadType() const&& = delete; + void UpdatePayloadType(PayloadType&& payloadType) { payloadType_ = std::move(payloadType); } - static std::unique_ptr New(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields_); + static std::unique_ptr New(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields_, + const NamespaceCacheConfigData& cacheCfg); KeyValueType KeyType() const { return keyType_; } KeyValueType SelectKeyType() const { return selectKeyType_; } - const FieldsSet& Fields() const { return fields_; } - const std::string& Name() const { return name_; } + const FieldsSet& Fields() const& noexcept { return fields_; } + const FieldsSet& Fields() const&& = delete; + const std::string& Name() const& noexcept { return name_; } + const std::string& Name() const&& = delete; IndexType Type() const { return type_; } const std::vector& SortOrders() const { return sortOrders_; } const IndexOpts& Opts() const { return opts_; } virtual void SetOpts(const IndexOpts& opts) { opts_ = opts; } - virtual void SetFields(FieldsSet&& fields) { fields_ = std::move(fields); } + void SetFields(FieldsSet&& fields) { fields_ = std::move(fields); } [[nodiscard]] SortType SortId() const noexcept { return sortId_; } virtual void SetSortedIdxCount(int sortedIdxCount) { sortedIdxCount_ = sortedIdxCount; } virtual FtMergeStatuses GetFtMergeStatuses(const RdxContext&) { @@ -124,6 +127,7 @@ class Index { virtual bool IsBuilt() const noexcept { return isBuilt_; } virtual void MarkBuilt() noexcept { isBuilt_ = true; } virtual void EnableUpdatesCountingMode(bool) noexcept {} + virtual void ReconfigureCache(const NamespaceCacheConfigData& cacheCfg) = 0; virtual void Dump(std::ostream& os, std::string_view step = " ", std::string_view offset = "") const { dump(os, step, offset); } @@ -140,8 +144,12 @@ class Index { IndexOpts opts_; // Payload type of items mutable PayloadType payloadType_; - // Fields in index. Valid only for composite indexes + +private: + // Fields in index FieldsSet fields_; + +protected: // Perfstat counter PerfStatCounterMT commitPerfCounter_; PerfStatCounterMT selectPerfCounter_; diff --git a/cpp_src/core/index/indexordered.cc b/cpp_src/core/index/indexordered.cc index 828030b1b..643562e35 100644 --- a/cpp_src/core/index/indexordered.cc +++ b/cpp_src/core/index/indexordered.cc @@ -11,7 +11,7 @@ template Variant IndexOrdered::Upsert(const Variant &key, IdType id, bool &clearCache) { if (key.Type().Is()) { if (this->empty_ids_.Unsorted().Add(id, IdSet::Auto, this->sortedIdxCount_)) { - if (this->cache_) this->cache_.reset(); + this->cache_.reset(); clearCache = true; this->isBuilt_ = false; } @@ -28,7 +28,7 @@ Variant IndexOrdered::Upsert(const Variant &key, IdType id, bool &clearCache) if (keyIt->second.Unsorted().Add(id, this->opts_.IsPK() ? IdSet::Ordered : IdSet::Auto, this->sortedIdxCount_)) { this->isBuilt_ = false; - if (this->cache_) this->cache_.reset(); + this->cache_.reset(); clearCache = true; } this->tracker_.markUpdated(this->idx_map, keyIt); @@ -56,10 +56,6 @@ SelectKeyResults IndexOrdered::SelectKey(const VariantArray &keys, CondType c return IndexUnordered::SelectKey(keys, condition, sortId, opts, ctx, rdxCtx); } - if (keys.size() < 1) { - throw Error(errParams, "For condition required at least 1 argument, but provided 0"); - } - SelectKeyResult res; auto startIt = this->idx_map.begin(); auto endIt = this->idx_map.end(); @@ -81,7 +77,6 @@ SelectKeyResults IndexOrdered::SelectKey(const VariantArray &keys, CondType c if (startIt == this->idx_map.end()) startIt = this->idx_map.upper_bound(static_cast(key1)); break; case CondRange: { - if (keys.size() != 2) throw Error(errParams, "For ranged query reuqired 2 arguments, but provided %d", keys.size()); const auto &key2 = keys[1]; startIt = this->idx_map.find(static_cast(key1)); @@ -207,18 +202,20 @@ IndexIterator::Ptr IndexOrdered::CreateIterator() const { } template -static std::unique_ptr IndexOrdered_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) { +static std::unique_ptr IndexOrdered_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) { switch (idef.Type()) { case IndexIntBTree: - return std::unique_ptr{new IndexOrdered>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), cacheCfg); case IndexInt64BTree: - return std::unique_ptr{new IndexOrdered>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), + cacheCfg); case IndexStrBTree: - return std::unique_ptr{new IndexOrdered>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), cacheCfg); case IndexDoubleBTree: - return std::unique_ptr{new IndexOrdered>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), cacheCfg); case IndexCompositeBTree: - return std::unique_ptr{new IndexOrdered>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), cacheCfg); case IndexStrHash: case IndexIntHash: case IndexInt64Hash: @@ -242,9 +239,11 @@ static std::unique_ptr IndexOrdered_New(const IndexDef &idef, PayloadType } // NOLINTBEGIN(*cplusplus.NewDeleteLeaks) -std::unique_ptr IndexOrdered_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) { - return (idef.opts_.IsPK() || idef.opts_.IsDense()) ? IndexOrdered_New(idef, std::move(payloadType), fields) - : IndexOrdered_New(idef, std::move(payloadType), fields); +std::unique_ptr IndexOrdered_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) { + return (idef.opts_.IsPK() || idef.opts_.IsDense()) + ? IndexOrdered_New(idef, std::move(payloadType), std::move(fields), cacheCfg) + : IndexOrdered_New(idef, std::move(payloadType), std::move(fields), cacheCfg); } // NOLINTEND(*cplusplus.NewDeleteLeaks) diff --git a/cpp_src/core/index/indexordered.h b/cpp_src/core/index/indexordered.h index e8dc6d229..a12928024 100644 --- a/cpp_src/core/index/indexordered.h +++ b/cpp_src/core/index/indexordered.h @@ -10,18 +10,19 @@ class IndexOrdered : public IndexUnordered { using ref_type = typename IndexUnordered::ref_type; using key_type = typename IndexUnordered::key_type; - IndexOrdered(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) - : IndexUnordered(idef, std::move(payloadType), fields) {} + IndexOrdered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, const NamespaceCacheConfigData &cacheCfg) + : IndexUnordered(idef, std::move(payloadType), std::move(fields), cacheCfg) {} SelectKeyResults SelectKey(const VariantArray &keys, CondType condition, SortType stype, Index::SelectOpts opts, const BaseFunctionCtx::Ptr &ctx, const RdxContext &) override; Variant Upsert(const Variant &key, IdType id, bool &clearCache) override; void MakeSortOrders(UpdateSortedContext &ctx) override; IndexIterator::Ptr CreateIterator() const override; - std::unique_ptr Clone() const override { return std::unique_ptr{new IndexOrdered(*this)}; } + std::unique_ptr Clone() const override { return std::make_unique>(*this); } bool IsOrdered() const noexcept override { return true; } }; -std::unique_ptr IndexOrdered_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields); +std::unique_ptr IndexOrdered_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg); } // namespace reindexer diff --git a/cpp_src/core/index/indexstore.cc b/cpp_src/core/index/indexstore.cc index 27fe6239f..df95f979c 100644 --- a/cpp_src/core/index/indexstore.cc +++ b/cpp_src/core/index/indexstore.cc @@ -7,8 +7,8 @@ namespace reindexer { template <> -IndexStore::IndexStore(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) - : Index(idef, std::move(payloadType), fields) { +IndexStore::IndexStore(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields) + : Index(idef, std::move(payloadType), std::move(fields)) { keyType_ = selectKeyType_ = KeyValueType::Double{}; opts_.Array(true); } @@ -68,8 +68,8 @@ Variant IndexStore::Upsert(const Variant &key, IdType /*id*/, bool template Variant IndexStore::Upsert(const Variant &key, IdType id, bool & /*clearCache*/) { - if (!opts_.IsArray() && !opts_.IsDense() && !opts_.IsSparse() && key.Type().Is()) { - idx_data.resize(std::max(id + 1, int(idx_data.size()))); + if (!opts_.IsArray() && !opts_.IsDense() && !opts_.IsSparse() && !key.Type().Is()) { + idx_data.resize(std::max(id + 1, IdType(idx_data.size()))); idx_data[id] = static_cast(key); } return Variant(key); @@ -106,43 +106,8 @@ SelectKeyResults IndexStore::SelectKey(const VariantArray &keys, CondType con if (condition == CondAny && !this->opts_.IsArray() && !this->opts_.IsSparse() && !sopts.distinct) throw Error(errParams, "The 'NOT NULL' condition is suported only by 'sparse' or 'array' indexes"); - // TODO: it may be necessary to remove or change this switch after QueryEntry refactoring - switch (condition) { - case CondAny: - if (!this->opts_.IsArray() && !this->opts_.IsSparse() && !sopts.distinct) { - throw Error(errParams, "The 'NOT NULL' condition is suported only by 'sparse' or 'array' indexes"); - } - break; - case CondEmpty: - if (!this->opts_.IsArray() && !this->opts_.IsSparse()) { - throw Error(errParams, "The 'is NULL' condition is suported only by 'sparse' or 'array' indexes"); - } - break; - case CondAllSet: - case CondSet: - case CondEq: - break; - case CondRange: - case CondDWithin: - if (keys.size() != 2) { - throw Error(errParams, "For condition %s required exactly 2 arguments, but provided %d", CondTypeToStr(condition), - keys.size()); - } - break; - case CondLt: - case CondLe: - case CondGt: - case CondGe: - case CondLike: - if (keys.size() != 1) { - throw Error(errParams, "For condition %s required exactly 1 argument, but provided %d", CondTypeToStr(condition), - keys.size()); - } - break; - } - - res.comparators_.push_back(Comparator(condition, KeyType(), keys, opts_.IsArray(), sopts.distinct, payloadType_, fields_, - idx_data.size() ? idx_data.data() : nullptr, opts_.collateOpts_)); + res.comparators_.emplace_back(condition, KeyType(), keys, opts_.IsArray(), bool(sopts.distinct), payloadType_, Fields(), + idx_data.size() ? idx_data.data() : nullptr, opts_.collateOpts_); return SelectKeyResults(std::move(res)); } @@ -183,20 +148,20 @@ void IndexStore::AddDestroyTask(tsl::detail_sparse_hash::ThreadTaskQueue &q) (void)q; } -std::unique_ptr IndexStore_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) { +std::unique_ptr IndexStore_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields) { switch (idef.Type()) { case IndexBool: - return std::unique_ptr{new IndexStore(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), std::move(fields)); case IndexIntStore: - return std::unique_ptr{new IndexStore(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), std::move(fields)); case IndexInt64Store: - return std::unique_ptr{new IndexStore(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), std::move(fields)); case IndexDoubleStore: - return std::unique_ptr{new IndexStore(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), std::move(fields)); case IndexStrStore: - return std::unique_ptr{new IndexStore(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), std::move(fields)); case IndexUuidStore: - return std::unique_ptr{new IndexStore(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), std::move(fields)); case IndexStrHash: case IndexStrBTree: case IndexIntBTree: diff --git a/cpp_src/core/index/indexstore.h b/cpp_src/core/index/indexstore.h index 832c11a4f..6c3a1b4f8 100644 --- a/cpp_src/core/index/indexstore.h +++ b/cpp_src/core/index/indexstore.h @@ -8,7 +8,8 @@ namespace reindexer { template class IndexStore : public Index { public: - IndexStore(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) : Index(idef, std::move(payloadType), fields) { + IndexStore(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields) + : Index(idef, std::move(payloadType), std::move(fields)) { static T a; keyType_ = selectKeyType_ = Variant(a).Type(); } @@ -21,13 +22,14 @@ class IndexStore : public Index { const BaseFunctionCtx::Ptr &ctx, const RdxContext &) override; void Commit() override; void UpdateSortedIds(const UpdateSortedContext & /*ctx*/) override {} - std::unique_ptr Clone() const override { return std::unique_ptr{new IndexStore(*this)}; } + std::unique_ptr Clone() const override { return std::make_unique>(*this); } IndexMemStat GetMemStat(const RdxContext &) override; bool HoldsStrings() const noexcept override { return std::is_same_v || std::is_same_v; } void Dump(std::ostream &os, std::string_view step = " ", std::string_view offset = "") const override { dump(os, step, offset); } virtual void AddDestroyTask(tsl::detail_sparse_hash::ThreadTaskQueue &) override; virtual bool IsDestroyPartSupported() const noexcept override { return true; } virtual bool IsUuid() const noexcept override final { return std::is_same_v; } + virtual void ReconfigureCache(const NamespaceCacheConfigData &) override {} template struct HasAddTask : std::false_type {}; @@ -46,8 +48,8 @@ class IndexStore : public Index { }; template <> -IndexStore::IndexStore(const IndexDef &, PayloadType, const FieldsSet &); +IndexStore::IndexStore(const IndexDef &, PayloadType &&, FieldsSet &&); -std::unique_ptr IndexStore_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields_); +std::unique_ptr IndexStore_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&); } // namespace reindexer diff --git a/cpp_src/core/index/indextext/fastindextext.cc b/cpp_src/core/index/indextext/fastindextext.cc index 36db2dc49..12cbdcae9 100644 --- a/cpp_src/core/index/indextext/fastindextext.cc +++ b/cpp_src/core/index/indextext/fastindextext.cc @@ -125,7 +125,7 @@ IndexMemStat FastIndexText::GetMemStat(const RdxContext &ctx) { contexted_shared_lock lck(this->mtx_, &ctx); ret.fulltextSize = this->holder_->GetMemStat(); - if (this->cache_ft_) ret.idsetCache = this->cache_ft_->GetMemStat(); + ret.idsetCache = this->cache_ft_ ? this->cache_ft_->GetMemStat() : LRUCacheMemStat(); return ret; } @@ -135,7 +135,7 @@ IdSet::Ptr FastIndexText::Select(FtCtx::Ptr fctx, FtDSLQuery &&dsl, bool inTr fctx->GetData()->extraWordSymbols_ = this->getConfig()->extraWordSymbols; fctx->GetData()->isWordPositions_ = true; - auto mergeData = this->holder_->Select(std::move(dsl), this->fields_.size(), fctx->NeedArea(), getConfig()->maxAreasInDoc, + auto mergeData = this->holder_->Select(std::move(dsl), this->Fields().size(), fctx->NeedArea(), getConfig()->maxAreasInDoc, inTransaction, std::move(statuses.statuses), useExternSt, rdxCtx); // convert vids(uniq documents id) to ids (real ids) IdSet::Ptr mergedIds = make_intrusive>(); @@ -232,7 +232,7 @@ void FastIndexText::commitFulltextImpl() { } auto tm1 = high_resolution_clock::now(); - this->holder_->Process(this->fields_.size(), !this->opts_.IsDense()); + this->holder_->Process(this->Fields().size(), !this->opts_.IsDense()); if (this->holder_->NeedClear(this->tracker_.isCompleteUpdated())) { this->tracker_.clear(); } @@ -373,12 +373,15 @@ reindexer::FtPreselectT FastIndexText::FtPreselect(const RdxContext &rdxCtx) std::vector(holder_->rowId2Vdoc_.size(), false), &holder_->rowId2Vdoc_}; } -std::unique_ptr FastIndexText_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) { +std::unique_ptr FastIndexText_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) { switch (idef.Type()) { case IndexFastFT: - return std::unique_ptr{new FastIndexText>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), + cacheCfg); case IndexCompositeFastFT: - return std::unique_ptr{new FastIndexText>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), + cacheCfg); case IndexStrHash: case IndexStrBTree: case IndexIntBTree: diff --git a/cpp_src/core/index/indextext/fastindextext.h b/cpp_src/core/index/indextext/fastindextext.h index 5a6ebb50d..1a2bb913a 100644 --- a/cpp_src/core/index/indextext/fastindextext.h +++ b/cpp_src/core/index/indextext/fastindextext.h @@ -2,8 +2,6 @@ #include "core/ft/config/ftfastconfig.h" #include "core/ft/ft_fast/dataholder.h" -#include "core/ft/ft_fast/dataprocessor.h" -#include "core/ft/typos.h" #include "indextext.h" namespace reindexer { @@ -22,10 +20,11 @@ class FastIndexText : public IndexText { this->CommitFulltext(); } - FastIndexText(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields) : Base(idef, std::move(payloadType), fields) { + FastIndexText(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields, const NamespaceCacheConfigData& cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields), cacheCfg) { initConfig(); } - std::unique_ptr Clone() const override { return std::unique_ptr{new FastIndexText(*this)}; } + std::unique_ptr Clone() const override { return std::make_unique>(*this); } IdSet::Ptr Select(FtCtx::Ptr fctx, FtDSLQuery&& dsl, bool inTransaction, FtMergeStatuses&&, FtUseExternStatuses, const RdxContext&) override final; IndexMemStat GetMemStat(const RdxContext&) override final; @@ -40,7 +39,7 @@ class FastIndexText : public IndexText { reindexer::FtPreselectT FtPreselect(const RdxContext& rdxCtx) override final; bool EnablePreselectBeforeFt() const override final { return getConfig()->enablePreselectBeforeFt; } -protected: +private: void commitFulltextImpl() override final; FtFastConfig* getConfig() const noexcept { return dynamic_cast(this->cfg_.get()); } void initConfig(const FtFastConfig* = nullptr); @@ -53,6 +52,7 @@ class FastIndexText : public IndexText { std::unique_ptr holder_; }; -std::unique_ptr FastIndexText_New(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields); +std::unique_ptr FastIndexText_New(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields, + const NamespaceCacheConfigData& cacheCfg); } // namespace reindexer diff --git a/cpp_src/core/index/indextext/fieldsgetter.h b/cpp_src/core/index/indextext/fieldsgetter.h index 791932b27..3af05f79f 100644 --- a/cpp_src/core/index/indextext/fieldsgetter.h +++ b/cpp_src/core/index/indextext/fieldsgetter.h @@ -1,6 +1,5 @@ #pragma once #include "core/ft/usingcontainer.h" -#include "core/index/payload_map.h" #include "core/payload/fieldsset.h" #include "vendor/utf8cpp/utf8.h" diff --git a/cpp_src/core/index/indextext/fuzzyindextext.cc b/cpp_src/core/index/indextext/fuzzyindextext.cc index 339ac1252..1e2344cf0 100644 --- a/cpp_src/core/index/indextext/fuzzyindextext.cc +++ b/cpp_src/core/index/indextext/fuzzyindextext.cc @@ -56,7 +56,7 @@ void FuzzyIndexText::commitFulltextImpl() { } template -void FuzzyIndexText::CreateConfig(const FtFuzzyConfig* cfg) { +void FuzzyIndexText::createConfig(const FtFuzzyConfig* cfg) { if (cfg) { this->cfg_.reset(new FtFuzzyConfig(*cfg)); return; @@ -65,13 +65,15 @@ void FuzzyIndexText::CreateConfig(const FtFuzzyConfig* cfg) { this->cfg_->parse(this->opts_.config, this->ftFields_); } -std::unique_ptr FuzzyIndexText_New(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields) { +std::unique_ptr FuzzyIndexText_New(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields, + const NamespaceCacheConfigData& cacheCfg) { switch (idef.Type()) { case IndexFuzzyFT: - return std::unique_ptr{new FuzzyIndexText>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), + cacheCfg); case IndexCompositeFuzzyFT: - return std::unique_ptr{ - new FuzzyIndexText>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), + std::move(fields), cacheCfg); case IndexStrHash: case IndexStrBTree: case IndexIntBTree: diff --git a/cpp_src/core/index/indextext/fuzzyindextext.h b/cpp_src/core/index/indextext/fuzzyindextext.h index d1a94780a..0c5d90846 100644 --- a/cpp_src/core/index/indextext/fuzzyindextext.h +++ b/cpp_src/core/index/indextext/fuzzyindextext.h @@ -1,9 +1,6 @@ #pragma once -#include "core/ft/config/ftfastconfig.h" #include "core/ft/ft_fuzzy/searchengine.h" -#include "core/ft/ftsetcashe.h" -#include "core/ft/idrelset.h" #include "indextext.h" namespace reindexer { @@ -13,10 +10,11 @@ class FuzzyIndexText : public IndexText { using Base = IndexText; public: - FuzzyIndexText(const FuzzyIndexText& other) : Base(other) { CreateConfig(other.getConfig()); } + FuzzyIndexText(const FuzzyIndexText& other) : Base(other) { createConfig(other.getConfig()); } - FuzzyIndexText(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields) : Base(idef, std::move(payloadType), fields) { - CreateConfig(); + FuzzyIndexText(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields, const NamespaceCacheConfigData& cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields), cacheCfg) { + createConfig(); } SelectKeyResults SelectKey(const VariantArray& /*keys*/, CondType, Index::SelectOpts, const BaseFunctionCtx::Ptr&, FtPreselectT&&, @@ -24,7 +22,7 @@ class FuzzyIndexText : public IndexText { assertrx(0); abort(); } - std::unique_ptr Clone() const override final { return std::unique_ptr{new FuzzyIndexText(*this)}; } + std::unique_ptr Clone() const override final { return std::make_unique>(*this); } IdSet::Ptr Select(FtCtx::Ptr fctx, FtDSLQuery&& dsl, bool inTransaction, FtMergeStatuses&&, FtUseExternStatuses, const RdxContext&) override final; Variant Upsert(const Variant& key, IdType id, bool& clearCache) override final { @@ -43,12 +41,13 @@ class FuzzyIndexText : public IndexText { protected: void commitFulltextImpl() override final; FtFuzzyConfig* getConfig() const noexcept { return dynamic_cast(this->cfg_.get()); } - void CreateConfig(const FtFuzzyConfig* cfg = nullptr); + void createConfig(const FtFuzzyConfig* cfg = nullptr); search_engine::SearchEngine engine_; std::vector vdocs_; }; -std::unique_ptr FuzzyIndexText_New(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields); +std::unique_ptr FuzzyIndexText_New(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields, + const NamespaceCacheConfigData& cacheCfg); } // namespace reindexer diff --git a/cpp_src/core/index/indextext/indextext.cc b/cpp_src/core/index/indextext/indextext.cc index 4a8a646db..4ef8d42ee 100644 --- a/cpp_src/core/index/indextext/indextext.cc +++ b/cpp_src/core/index/indextext/indextext.cc @@ -9,7 +9,11 @@ namespace reindexer { template -IndexText::IndexText(const IndexText &other) : IndexUnordered(other), cache_ft_(std::make_shared()) { +IndexText::IndexText(const IndexText &other) + : IndexUnordered(other), + cache_ft_(std::make_unique(other.cacheMaxSize_, other.hitsToCache_)), + cacheMaxSize_(other.cacheMaxSize_), + hitsToCache_(other.hitsToCache_) { initSearchers(); } // Generic implemetation for string index @@ -19,16 +23,17 @@ void IndexText::initSearchers() { size_t jsonPathIdx = 0; if (this->payloadType_) { - for (unsigned i = 0; i < this->fields_.size(); i++) { - auto fieldIdx = this->fields_[i]; + const auto &fields = this->Fields(); + for (unsigned i = 0, s = fields.size(); i < s; i++) { + auto fieldIdx = fields[i]; if (fieldIdx == IndexValueType::SetByJsonPath) { - assertrx(jsonPathIdx < this->fields_.getJsonPathsLength()); - ftFields_.emplace(this->fields_.getJsonPath(jsonPathIdx++), i); + assertrx(jsonPathIdx < fields.getJsonPathsLength()); + ftFields_.emplace(fields.getJsonPath(jsonPathIdx++), i); } else { ftFields_.emplace(this->payloadType_->Field(fieldIdx).Name(), i); } } - if rx_unlikely (ftFields_.size() != this->fields_.size()) { + if rx_unlikely (ftFields_.size() != fields.size()) { throw Error(errParams, "Composite fulltext index '%s' contains duplicated fields", this->name_); } if rx_unlikely (ftFields_.size() > kMaxFtCompositeFields) { @@ -55,6 +60,18 @@ void IndexText::SetOpts(const IndexOpts &opts) { } } +template +void IndexText::ReconfigureCache(const NamespaceCacheConfigData &cacheCfg) { + if (cacheMaxSize_ != cacheCfg.ftIdxCacheSize || hitsToCache_ != cacheCfg.ftIdxHitsToCache) { + cacheMaxSize_ = cacheCfg.ftIdxCacheSize; + hitsToCache_ = cacheCfg.ftIdxHitsToCache; + if (cache_ft_) { + cache_ft_ = std::make_unique(cacheMaxSize_, hitsToCache_); + } + } + Base::ReconfigureCache(cacheCfg); +} + template FtCtx::Ptr IndexText::prepareFtCtx(const BaseFunctionCtx::Ptr &ctx) { FtCtx::Ptr ftctx = reindexer::reinterpret_pointer_cast(ctx); @@ -178,7 +195,7 @@ SelectKeyResults IndexText::SelectKey(const VariantArray &keys, CondType cond template FieldsGetter IndexText::Getter() { - return FieldsGetter(this->fields_, this->payloadType_, this->KeyType()); + return FieldsGetter(this->Fields(), this->payloadType_, this->KeyType()); } template class IndexText>; diff --git a/cpp_src/core/index/indextext/indextext.h b/cpp_src/core/index/indextext/indextext.h index d09c61a26..b1458227d 100644 --- a/cpp_src/core/index/indextext/indextext.h +++ b/cpp_src/core/index/indextext/indextext.h @@ -8,7 +8,6 @@ #include "core/ft/ftsetcashe.h" #include "core/index/indexunordered.h" #include "core/selectfunc/ctx/ftctx.h" -#include "estl/fast_hash_map.h" #include "estl/shared_mutex.h" #include "fieldsgetter.h" @@ -20,8 +19,11 @@ class IndexText : public IndexUnordered { public: IndexText(const IndexText& other); - IndexText(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields) - : IndexUnordered(idef, std::move(payloadType), fields), cache_ft_(std::make_shared()) { + IndexText(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields, const NamespaceCacheConfigData& cacheCfg) + : IndexUnordered(idef, std::move(payloadType), std::move(fields), cacheCfg), + cache_ft_(std::make_unique(cacheCfg.ftIdxCacheSize, cacheCfg.ftIdxHitsToCache)), + cacheMaxSize_(cacheCfg.ftIdxCacheSize), + hitsToCache_(cacheCfg.ftIdxHitsToCache) { this->selectKeyType_ = KeyValueType::String{}; initSearchers(); } @@ -39,7 +41,7 @@ class IndexText : public IndexUnordered { // Rebuild will be done on first select } void CommitFulltext() override final { - cache_ft_ = std::make_shared(); + cache_ft_ = std::make_unique(cacheMaxSize_, hitsToCache_); commitFulltextImpl(); this->isBuilt_ = true; } @@ -51,7 +53,8 @@ class IndexText : public IndexUnordered { } void ClearCache(const std::bitset& s) override { Base::ClearCache(s); } void MarkBuilt() noexcept override { assertrx(0); } - bool IsFulltext() const noexcept override { return true; } + bool IsFulltext() const noexcept override final { return true; } + void ReconfigureCache(const NamespaceCacheConfigData& cacheCfg) override final; protected: using Mutex = MarkedMutex; @@ -66,7 +69,9 @@ class IndexText : public IndexUnordered { void initSearchers(); FieldsGetter Getter(); - std::shared_ptr cache_ft_; + std::unique_ptr cache_ft_; + size_t cacheMaxSize_; + uint32_t hitsToCache_; RHashMap ftFields_; std::unique_ptr cfg_; diff --git a/cpp_src/core/index/indexunordered.cc b/cpp_src/core/index/indexunordered.cc index b311fe25f..06af94780 100644 --- a/cpp_src/core/index/indexunordered.cc +++ b/cpp_src/core/index/indexunordered.cc @@ -3,7 +3,6 @@ #include "core/index/payload_map.h" #include "core/index/string_map.h" #include "core/indexdef.h" -#include "core/keyvalue/uuid.h" #include "core/rdxcontext.h" #include "rtree/greenesplitter.h" #include "rtree/linearsplitter.h" @@ -18,55 +17,95 @@ namespace reindexer { constexpr int kMaxIdsForDistinct = 500; template -IndexUnordered::IndexUnordered(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) - : Base(idef, std::move(payloadType), fields), idx_map() { +IndexUnordered::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) { static_assert(!(is_str_map_v || is_payload_map_v)); } template <> -IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) - : Base(idef, std::move(payloadType), fields), idx_map(idef.opts_.collateOpts_) {} +IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(idef.opts_.collateOpts_), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) {} template <> -IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) - : Base(idef, std::move(payloadType), fields), idx_map(idef.opts_.collateOpts_) {} +IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(idef.opts_.collateOpts_), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) {} template <> -IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) - : Base(idef, std::move(payloadType), fields), idx_map(idef.opts_.collateOpts_) {} +IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(idef.opts_.collateOpts_), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) {} template <> -IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType payloadType, - const FieldsSet &fields) - : Base(idef, std::move(payloadType), fields), idx_map(idef.opts_.collateOpts_) {} +IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(idef.opts_.collateOpts_), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) {} template <> -IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) - : Base(idef, std::move(payloadType), fields), idx_map(idef.opts_.collateOpts_) {} +IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(idef.opts_.collateOpts_), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) {} template <> -IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType payloadType, - const FieldsSet &fields) - : Base(idef, payloadType, fields), idx_map(std::move(payloadType), Base::Fields()) {} +IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(PayloadType{Base::GetPayloadType()}, FieldsSet{Base::Fields()}), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) {} template <> -IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType payloadType, - const FieldsSet &fields) - : Base(idef, payloadType, fields), idx_map(std::move(payloadType), Base::Fields()) {} +IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, + FieldsSet &&fields, const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(PayloadType{Base::GetPayloadType()}, FieldsSet{Base::Fields()}), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) {} template <> -IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType payloadType, - const FieldsSet &fields) - : Base(idef, payloadType, fields), idx_map(std::move(payloadType), Base::Fields()) {} +IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, + FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(PayloadType{Base::GetPayloadType()}, FieldsSet{Base::Fields()}), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) {} template <> -IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) - : Base(idef, payloadType, fields), idx_map(std::move(payloadType), Base::Fields()) {} +IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(PayloadType{Base::GetPayloadType()}, FieldsSet{Base::Fields()}), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) {} template <> -IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType payloadType, - const FieldsSet &fields) - : Base(idef, payloadType, fields), idx_map(std::move(payloadType), Base::Fields()) {} +IndexUnordered>::IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) + : Base(idef, std::move(payloadType), std::move(fields)), + idx_map(PayloadType{Base::GetPayloadType()}, FieldsSet{Base::Fields()}), + cacheMaxSize_(cacheCfg.idxIdsetCacheSize), + hitsToCache_(cacheCfg.idxIdsetHitsToCache) {} template bool IndexUnordered::HoldsStrings() const noexcept { @@ -79,7 +118,13 @@ bool IndexUnordered::HoldsStrings() const noexcept { template IndexUnordered::IndexUnordered(const IndexUnordered &other) - : Base(other), idx_map(other.idx_map), cache_(nullptr), empty_ids_(other.empty_ids_), tracker_(other.tracker_) {} + : Base(other), + idx_map(other.idx_map), + cache_(nullptr), + cacheMaxSize_(other.cacheMaxSize_), + hitsToCache_(other.hitsToCache_), + empty_ids_(other.empty_ids_), + tracker_(other.tracker_) {} template size_t heap_size(const key_type & /*kt*/) { @@ -130,7 +175,7 @@ Variant IndexUnordered::Upsert(const Variant &key, IdType id, bool &clearCach // reset cache if (key.Type().Is()) { // TODO maybe error or default value if the index is not sparse if (this->empty_ids_.Unsorted().Add(id, IdSet::Auto, this->sortedIdxCount_)) { - if (cache_) cache_.reset(); + cache_.reset(); clearCache = true; this->isBuilt_ = false; } @@ -146,7 +191,7 @@ Variant IndexUnordered::Upsert(const Variant &key, IdType id, bool &clearCach } if (keyIt->second.Unsorted().Add(id, this->opts_.IsPK() ? IdSet::Ordered : IdSet::Auto, this->sortedIdxCount_)) { - if (cache_) cache_.reset(); + cache_.reset(); clearCache = true; this->isBuilt_ = false; } @@ -168,7 +213,7 @@ void IndexUnordered::Delete(const Variant &key, IdType id, StringsHolder &str delcnt = this->empty_ids_.Unsorted().Erase(id); assertrx(delcnt); this->isBuilt_ = false; - if (cache_) cache_.reset(); + cache_.reset(); clearCache = true; return; } @@ -180,12 +225,12 @@ void IndexUnordered::Delete(const Variant &key, IdType id, StringsHolder &str delcnt = keyIt->second.Unsorted().Erase(id); (void)delcnt; this->isBuilt_ = false; - if (cache_) cache_.reset(); + cache_.reset(); clearCache = true; // TODO: we have to implement removal of composite indexes (doesn't work right now) assertf(this->opts_.IsArray() || this->Opts().IsSparse() || delcnt, "Delete unexists id from index '%s' id=%d,key=%s (%s)", this->name_, - id, key.As(this->payloadType_, this->fields_), - Variant(keyIt->first).As(this->payloadType_, this->fields_)); + id, key.As(this->payloadType_, this->Fields()), + Variant(keyIt->first).As(this->payloadType_, this->Fields())); if (keyIt->second.Unsorted().IsEmpty()) { this->tracker_.markDeleted(keyIt); @@ -345,7 +390,7 @@ template void IndexUnordered::Commit() { this->empty_ids_.Unsorted().Commit(); - if (!cache_) cache_.reset(new IdSetCache()); + if (!cache_) cache_.reset(new IdSetCache(cacheMaxSize_, hitsToCache_)); if (!tracker_.isUpdated()) return; @@ -433,18 +478,33 @@ void IndexUnordered::AddDestroyTask(tsl::detail_sparse_hash::ThreadTaskQueue (void)q; } +template +void IndexUnordered::ReconfigureCache(const NamespaceCacheConfigData &cacheCfg) { + if (cacheMaxSize_ != cacheCfg.idxIdsetCacheSize || hitsToCache_ != cacheCfg.idxIdsetHitsToCache) { + cacheMaxSize_ = cacheCfg.idxIdsetCacheSize; + hitsToCache_ = cacheCfg.idxIdsetHitsToCache; + if (cache_) { + cache_.reset(new IdSetCache(cacheMaxSize_, hitsToCache_)); + } + } +} + template -static std::unique_ptr IndexUnordered_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) { +static std::unique_ptr IndexUnordered_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) { switch (idef.Type()) { case IndexIntHash: - return std::unique_ptr{new IndexUnordered>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), + cacheCfg); case IndexInt64Hash: - return std::unique_ptr{ - new IndexUnordered>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), + std::move(fields), cacheCfg); case IndexStrHash: - return std::unique_ptr{new IndexUnordered>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), + cacheCfg); case IndexCompositeHash: - return std::unique_ptr{new IndexUnordered>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), + cacheCfg); case IndexStrBTree: case IndexIntBTree: case IndexInt64BTree: @@ -469,9 +529,11 @@ static std::unique_ptr IndexUnordered_New(const IndexDef &idef, PayloadTy } // NOLINTBEGIN(*cplusplus.NewDeleteLeaks) -std::unique_ptr IndexUnordered_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) { - return (idef.opts_.IsPK() || idef.opts_.IsDense()) ? IndexUnordered_New(idef, std::move(payloadType), fields) - : IndexUnordered_New(idef, std::move(payloadType), fields); +std::unique_ptr IndexUnordered_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) { + return (idef.opts_.IsPK() || idef.opts_.IsDense()) + ? IndexUnordered_New(idef, std::move(payloadType), std::move(fields), cacheCfg) + : IndexUnordered_New(idef, std::move(payloadType), std::move(fields), cacheCfg); } // NOLINTEND(*cplusplus.NewDeleteLeaks) diff --git a/cpp_src/core/index/indexunordered.h b/cpp_src/core/index/indexunordered.h index 05cc89d95..11b8fbe74 100644 --- a/cpp_src/core/index/indexunordered.h +++ b/cpp_src/core/index/indexunordered.h @@ -26,7 +26,7 @@ class IndexUnordered : public IndexStore> { typename T::key_type>::type>::type>::type; using key_type = StoreIndexKeyType; - IndexUnordered(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields); + IndexUnordered(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, const NamespaceCacheConfigData &cacheCfg); IndexUnordered(const IndexUnordered &other); Variant Upsert(const Variant &key, IdType id, bool &chearCache) override; @@ -35,7 +35,7 @@ class IndexUnordered : public IndexStore> { const BaseFunctionCtx::Ptr &ctx, const RdxContext &) override; void Commit() override; void UpdateSortedIds(const UpdateSortedContext &) override; - std::unique_ptr Clone() const override { return std::unique_ptr{new IndexUnordered(*this)}; } + std::unique_ptr Clone() const override { return std::make_unique>(*this); } IndexMemStat GetMemStat(const RdxContext &) override; size_t Size() const noexcept override final { return idx_map.size(); } void SetSortedIdxCount(int sortedIdxCount) override; @@ -49,6 +49,7 @@ class IndexUnordered : public IndexStore> { void AddDestroyTask(tsl::detail_sparse_hash::ThreadTaskQueue &) override; bool IsDestroyPartSupported() const noexcept override { return true; } + void ReconfigureCache(const NamespaceCacheConfigData &cacheCfg) override; protected: bool tryIdsetCache(const VariantArray &keys, CondType condition, SortType sortId, @@ -60,6 +61,8 @@ class IndexUnordered : public IndexStore> { T idx_map; // Merged idsets cache atomic_unique_ptr cache_; + size_t cacheMaxSize_; + uint32_t hitsToCache_; // Empty ids Index::KeyEntry empty_ids_; // Tracker of updates @@ -70,8 +73,9 @@ class IndexUnordered : public IndexStore> { void dump(S &os, std::string_view step, std::string_view offset) const; }; -constexpr inline unsigned maxSelectivityPercentForIdset() noexcept { return 30u; } +constexpr unsigned maxSelectivityPercentForIdset() noexcept { return 30u; } -std::unique_ptr IndexUnordered_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields); +std::unique_ptr IndexUnordered_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg); } // namespace reindexer diff --git a/cpp_src/core/index/payload_map.h b/cpp_src/core/index/payload_map.h index 91132989f..357b671a3 100644 --- a/cpp_src/core/index/payload_map.h +++ b/cpp_src/core/index/payload_map.h @@ -12,7 +12,7 @@ namespace reindexer { class PayloadValueWithHash : public PayloadValue { public: PayloadValueWithHash() noexcept : PayloadValue() {} - PayloadValueWithHash(PayloadValue pv, const PayloadType &pt, const FieldsSet &fields) + PayloadValueWithHash(PayloadValue &&pv, const PayloadType &pt, const FieldsSet &fields) : PayloadValue(std::move(pv)), hash_(ConstPayload(pt, *static_cast(this)).GetHash(fields)) {} PayloadValueWithHash(const PayloadValueWithHash &o) noexcept : PayloadValue(o), hash_(o.hash_) {} PayloadValueWithHash(PayloadValueWithHash &&o) noexcept : PayloadValue(std::move(o)), hash_(o.hash_) {} @@ -29,7 +29,8 @@ class PayloadValueWithHash : public PayloadValue { struct equal_composite { using is_transparent = void; - equal_composite(PayloadType type, const FieldsSet &fields) : type_(std::move(type)), fields_(fields) {} + template + equal_composite(PT &&type, FS &&fields) : type_(std::forward(type)), fields_(std::forward(fields)) {} bool operator()(const PayloadValue &lhs, const PayloadValue &rhs) const { assertrx(type_); return ConstPayload(type_, lhs).IsEQ(rhs, fields_); @@ -50,7 +51,8 @@ struct equal_composite { FieldsSet fields_; }; struct hash_composite { - hash_composite(PayloadType type, const FieldsSet &fields) : type_(std::move(type)), fields_(fields) {} + template + hash_composite(PT &&type, FS &&fields) : type_(std::forward(type)), fields_(std::forward(fields)) {} size_t operator()(const PayloadValueWithHash &s) const { return s.GetHash(); } size_t operator()(const PayloadValue &s) const { assertrx(type_); @@ -61,7 +63,7 @@ struct hash_composite { }; struct less_composite { - less_composite(PayloadType type, const FieldsSet &fields) : type_(std::move(type)), fields_(fields) {} + less_composite(PayloadType &&type, FieldsSet &&fields) : type_(std::move(type)), fields_(std::move(fields)) {} bool operator()(const PayloadValue &lhs, const PayloadValue &rhs) const { assertrx(type_); assertrx(!lhs.IsFree()); @@ -78,7 +80,7 @@ class payload_str_fields_helper; template <> class payload_str_fields_helper { protected: - payload_str_fields_helper(PayloadType payloadType, const FieldsSet &fields) : payload_type_(std::move(payloadType)) { + payload_str_fields_helper(PayloadType &&payloadType, const FieldsSet &fields) : payload_type_(std::move(payloadType)) { if (fields.getTagsPathsLength() || fields.getJsonPathsLength()) { str_fields_.push_back(0); } @@ -153,13 +155,13 @@ class unordered_payload_map static_assert(std::is_nothrow_move_constructible>::value, "Nothrow movebale key and value required"); - unordered_payload_map(size_t size, PayloadType pt, const FieldsSet &f) - : base_hash_map(size, hash_composite(pt, f), equal_composite(pt, f)), - payload_str_fields_helper(pt, f), + unordered_payload_map(size_t size, PayloadType &&pt, FieldsSet &&f) + : base_hash_map(size, hash_composite(PayloadType{pt}, FieldsSet{f}), equal_composite(PayloadType{pt}, FieldsSet{f})), + payload_str_fields_helper(PayloadType{pt}, f), payloadType_(std::move(pt)), - fields_(f) {} + fields_(std::move(f)) {} - unordered_payload_map(PayloadType pt, const FieldsSet &f) : unordered_payload_map(1000, std::move(pt), f) {} + unordered_payload_map(PayloadType &&pt, FieldsSet &&f) : unordered_payload_map(1000, std::move(pt), std::move(f)) {} unordered_payload_map(const unordered_payload_map &other) : base_hash_map(other), payload_str_fields_helper(other), payloadType_(other.payloadType_), fields_(other.fields_) { @@ -185,7 +187,7 @@ class unordered_payload_map } template std::pair emplace(const PayloadValue &pl, V &&v) { - PayloadValueWithHash key(pl, payloadType_, fields_); + PayloadValueWithHash key(PayloadValue{pl}, payloadType_, fields_); auto res = base_hash_map::emplace(std::move(key), std::forward(v)); if (res.second) this->add_ref(res.first->first); return res; @@ -207,7 +209,7 @@ class unordered_payload_map } T1 &operator[](const PayloadValue &k) { - PayloadValueWithHash key(k, payloadType_, fields_); + PayloadValueWithHash key(PayloadValue{k}, payloadType_, fields_); return base_hash_map::operator[](std::move(key)); } T1 &operator[](PayloadValue &&k) { @@ -247,7 +249,8 @@ class payload_map : private btree::btree_map, using payload_str_fields_helper::have_str_fields; payload_map(PayloadType payloadType, const FieldsSet &fields) - : base_tree_map(less_composite(payloadType, fields)), payload_str_fields_helper(std::move(payloadType), fields) {} + : base_tree_map(less_composite(PayloadType{payloadType}, FieldsSet{fields})), + payload_str_fields_helper(std::move(payloadType), fields) {} payload_map(const payload_map &other) : base_tree_map(other), payload_str_fields_helper(other) { for (auto &item : *this) this->add_ref(const_cast(item.first)); } diff --git a/cpp_src/core/index/rtree/indexrtree.cc b/cpp_src/core/index/rtree/indexrtree.cc index 555db9012..c285a768d 100644 --- a/cpp_src/core/index/rtree/indexrtree.cc +++ b/cpp_src/core/index/rtree/indexrtree.cc @@ -78,7 +78,7 @@ void IndexRTree::Upsert(VariantArra if (keyIt->second.Unsorted().Add(id, this->opts_.IsPK() ? IdSet::Ordered : IdSet::Auto, this->sortedIdxCount_)) { this->isBuilt_ = false; // reset cache - if (this->cache_) this->cache_.reset(); + this->cache_.reset(); clearCache = true; } this->tracker_.markUpdated(this->idx_map, keyIt); @@ -99,7 +99,7 @@ void IndexRTree::Delete(const Varia const Point point = static_cast(keys); typename Map::iterator keyIt = this->idx_map.find(point); if (keyIt == this->idx_map.end()) return; - if (this->cache_) this->cache_.reset(); + this->cache_.reset(); clearCache = true; this->isBuilt_ = false; @@ -108,8 +108,8 @@ void IndexRTree::Delete(const Varia (void)delcnt; // TODO: we have to implement removal of composite indexes (doesn't work right now) assertf(this->Opts().IsSparse() || delcnt, "Delete unexists id from index '%s' id=%d,key=%s (%s)", this->name_, id, - Variant(keys).template As(this->payloadType_, this->fields_), - Variant(keyIt->first).As(this->payloadType_, this->fields_)); + Variant(keys).template As(this->payloadType_, this->Fields()), + Variant(keyIt->first).As(this->payloadType_, this->Fields())); if (keyIt->second.Unsorted().IsEmpty()) { this->tracker_.markDeleted(keyIt); @@ -120,41 +120,44 @@ void IndexRTree::Delete(const Varia } } -std::unique_ptr IndexRTree_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) { +std::unique_ptr IndexRTree_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) { switch (idef.opts_.RTreeType()) { case IndexOpts::Linear: if (idef.opts_.IsPK() || idef.opts_.IsDense()) { - return std::unique_ptr{ - new IndexRTree(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), + std::move(fields), cacheCfg); } else { - return std::unique_ptr{new IndexRTree(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), std::move(fields), + cacheCfg); } case IndexOpts::Quadratic: if (idef.opts_.IsPK() || idef.opts_.IsDense()) { - return std::unique_ptr{ - new IndexRTree(idef, std::move(payloadType), fields)}; - } else { - return std::unique_ptr{ - new IndexRTree(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), + std::move(fields), cacheCfg); } + return std::make_unique>(idef, std::move(payloadType), std::move(fields), + cacheCfg); case IndexOpts::Greene: if (idef.opts_.IsPK() || idef.opts_.IsDense()) { - return std::unique_ptr{ - new IndexRTree(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), + std::move(fields), cacheCfg); } else { - return std::unique_ptr{new IndexRTree(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), std::move(fields), + cacheCfg); } case IndexOpts::RStar: if (idef.opts_.IsPK() || idef.opts_.IsDense()) { - return std::unique_ptr{ - new IndexRTree(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), + std::move(fields), cacheCfg); } else { - return std::unique_ptr{new IndexRTree(idef, std::move(payloadType), fields)}; + return std::make_unique>(idef, std::move(payloadType), std::move(fields), + cacheCfg); } - default: - assertrx(0); - abort(); } + + assertrx(0); + std::abort(); } template class IndexRTree; diff --git a/cpp_src/core/index/rtree/indexrtree.h b/cpp_src/core/index/rtree/indexrtree.h index effdbd04d..9d4b23986 100644 --- a/cpp_src/core/index/rtree/indexrtree.h +++ b/cpp_src/core/index/rtree/indexrtree.h @@ -11,8 +11,8 @@ class IndexRTree : public IndexUnordered; public: - IndexRTree(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) - : IndexUnordered{idef, std::move(payloadType), fields} {} + IndexRTree(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, const NamespaceCacheConfigData &cacheCfg) + : IndexUnordered{idef, std::move(payloadType), std::move(fields), cacheCfg} {} SelectKeyResults SelectKey(const VariantArray &keys, CondType, SortType, Index::SelectOpts, const BaseFunctionCtx::Ptr &, const RdxContext &) override; @@ -21,9 +21,10 @@ class IndexRTree : public IndexUnordered::Delete; void Delete(const VariantArray &keys, IdType id, StringsHolder &, bool &clearCache) override; - std::unique_ptr Clone() const override { return std::unique_ptr{new IndexRTree(*this)}; } + std::unique_ptr Clone() const override { return std::make_unique(*this); } }; -std::unique_ptr IndexRTree_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields); +std::unique_ptr IndexRTree_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg); } // namespace reindexer diff --git a/cpp_src/core/index/string_map.h b/cpp_src/core/index/string_map.h index 98114e386..ae60f9221 100644 --- a/cpp_src/core/index/string_map.h +++ b/cpp_src/core/index/string_map.h @@ -2,15 +2,13 @@ #include "core/keyvalue/key_string.h" #include "core/keyvalue/uuid.h" -#include "core/payload/payloadtype.h" +#include "core/namespace/stringsholder.h" #include "cpp-btree/btree_map.h" #include "sparse-map/sparse_map.h" #include "tools/stringstools.h" namespace reindexer { -class FieldsSet; - struct less_key_string { using is_transparent = void; diff --git a/cpp_src/core/index/ttlindex.cc b/cpp_src/core/index/ttlindex.cc index 11e1a8726..7fdbc1e5b 100644 --- a/cpp_src/core/index/ttlindex.cc +++ b/cpp_src/core/index/ttlindex.cc @@ -15,11 +15,13 @@ void UpdateExpireAfter(Index *i, int64_t v) { } } -std::unique_ptr TtlIndex_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) { +std::unique_ptr TtlIndex_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) { if (idef.opts_.IsPK() || idef.opts_.IsDense()) { - return std::unique_ptr{new TtlIndex>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), + cacheCfg); } - return std::unique_ptr{new TtlIndex>(idef, std::move(payloadType), fields)}; + return std::make_unique>>(idef, std::move(payloadType), std::move(fields), cacheCfg); } } // namespace reindexer diff --git a/cpp_src/core/index/ttlindex.h b/cpp_src/core/index/ttlindex.h index acc0b1a72..b59b6947c 100644 --- a/cpp_src/core/index/ttlindex.h +++ b/cpp_src/core/index/ttlindex.h @@ -10,11 +10,11 @@ namespace reindexer { template class TtlIndex : public IndexOrdered { public: - TtlIndex(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) - : IndexOrdered(idef, std::move(payloadType), fields), expireAfter_(idef.expireAfter_) {} + TtlIndex(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, const NamespaceCacheConfigData &cacheCfg) + : IndexOrdered(idef, std::move(payloadType), std::move(fields), cacheCfg), expireAfter_(idef.expireAfter_) {} TtlIndex(const TtlIndex &other) : IndexOrdered(other), expireAfter_(other.expireAfter_) {} int64_t GetTTLValue() const noexcept override { return expireAfter_; } - std::unique_ptr Clone() const override { return std::unique_ptr{new TtlIndex(*this)}; } + std::unique_ptr Clone() const override { return std::make_unique>(*this); } void UpdateExpireAfter(int64_t v) noexcept { expireAfter_ = v; } private: @@ -22,7 +22,8 @@ class TtlIndex : public IndexOrdered { int64_t expireAfter_ = 0; }; -std::unique_ptr TtlIndex_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields); +std::unique_ptr TtlIndex_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg); void UpdateExpireAfter(Index *i, int64_t v); } // namespace reindexer diff --git a/cpp_src/core/index/uuid_index.cc b/cpp_src/core/index/uuid_index.cc index 57ff52cb4..7622020d3 100644 --- a/cpp_src/core/index/uuid_index.cc +++ b/cpp_src/core/index/uuid_index.cc @@ -10,8 +10,9 @@ void UuidIndex::Upsert(VariantArray &result, const VariantArray &keys, IdType id } } -std::unique_ptr IndexUuid_New(const IndexDef &idef, PayloadType payloadType, const FieldsSet &fields) { - return std::unique_ptr{new UuidIndex{idef, std::move(payloadType), fields}}; +std::unique_ptr IndexUuid_New(const IndexDef &idef, PayloadType &&payloadType, FieldsSet &&fields, + const NamespaceCacheConfigData &cacheCfg) { + return std::make_unique(idef, std::move(payloadType), std::move(fields), cacheCfg); } } // namespace reindexer diff --git a/cpp_src/core/index/uuid_index.h b/cpp_src/core/index/uuid_index.h index 1352c3f43..fb2a4411d 100644 --- a/cpp_src/core/index/uuid_index.h +++ b/cpp_src/core/index/uuid_index.h @@ -1,7 +1,6 @@ #pragma once #include "core/index/indexunordered.h" -#include "core/keyvalue/uuid.h" namespace reindexer { @@ -9,12 +8,14 @@ class UuidIndex : public IndexUnordered using Base = IndexUnordered>; public: - UuidIndex(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields) : Base{idef, std::move(payloadType), fields} {} - std::unique_ptr Clone() const override { return std::unique_ptr{new UuidIndex{*this}}; } + UuidIndex(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields, const NamespaceCacheConfigData& cacheCfg) + : Base{idef, std::move(payloadType), std::move(fields), cacheCfg} {} + std::unique_ptr Clone() const override { return std::make_unique(*this); } using Base::Upsert; void Upsert(VariantArray& result, const VariantArray& keys, IdType id, bool& clearCache) override; // TODO delete this after #1353 }; -std::unique_ptr IndexUuid_New(const IndexDef& idef, PayloadType payloadType, const FieldsSet& fields); +std::unique_ptr IndexUuid_New(const IndexDef& idef, PayloadType&& payloadType, FieldsSet&& fields, + const NamespaceCacheConfigData& cacheCfg); } // namespace reindexer diff --git a/cpp_src/core/indexdef.cc b/cpp_src/core/indexdef.cc index 8195a2fac..9efe90eda 100644 --- a/cpp_src/core/indexdef.cc +++ b/cpp_src/core/indexdef.cc @@ -117,9 +117,9 @@ IndexDef::IndexDef(std::string name, JsonPaths jsonPaths, IndexType type, IndexO this->FromType(type); } -bool IndexDef::IsEqual(const IndexDef &other, bool skipConfig) const { +bool IndexDef::IsEqual(const IndexDef &other, IndexComparison cmpType) const { return name_ == other.name_ && jsonPaths_ == other.jsonPaths_ && Type() == other.Type() && fieldType_ == other.fieldType_ && - opts_.IsEqual(other.opts_, skipConfig) && expireAfter_ == other.expireAfter_; + opts_.IsEqual(other.opts_, cmpType) && expireAfter_ == other.expireAfter_; } IndexType IndexDef::Type() const { diff --git a/cpp_src/core/indexdef.h b/cpp_src/core/indexdef.h index a843a4993..cb4651120 100644 --- a/cpp_src/core/indexdef.h +++ b/cpp_src/core/indexdef.h @@ -26,9 +26,7 @@ struct IndexDef { IndexDef(std::string name, JsonPaths jsonPaths, std::string indexType, std::string fieldType, IndexOpts opts, int64_t expireAfter); IndexDef(std::string name, std::string indexType, std::string fieldType, IndexOpts opts); IndexDef(std::string name, JsonPaths jsonPaths, IndexType type, IndexOpts opts); - bool operator==(const IndexDef &other) const { return IsEqual(other, false); } - bool operator!=(const IndexDef &other) const { return !IsEqual(other, false); } - bool IsEqual(const IndexDef &other, bool skipConfig) const; + bool IsEqual(const IndexDef &other, IndexComparison cmpType) const; IndexType Type() const; std::string getCollateMode() const; const std::vector &Conditions() const; diff --git a/cpp_src/core/indexopts.cc b/cpp_src/core/indexopts.cc index 11948330f..6e8a3cafc 100644 --- a/cpp_src/core/indexopts.cc +++ b/cpp_src/core/indexopts.cc @@ -22,8 +22,9 @@ IndexOpts::IndexOpts(uint8_t flags, CollateMode mode, RTreeIndexType rtreeType) IndexOpts::IndexOpts(const std::string& sortOrderUTF8, uint8_t flags, RTreeIndexType rtreeType) : options(flags), collateOpts_(sortOrderUTF8), rtreeType_(rtreeType) {} -bool IndexOpts::IsEqual(const IndexOpts& other, bool skipConfig) const { - return options == other.options && (skipConfig || config == other.config) && collateOpts_.mode == other.collateOpts_.mode && +bool IndexOpts::IsEqual(const IndexOpts& other, IndexComparison cmpType) const noexcept { + return options == other.options && (cmpType == IndexComparison::SkipConfig || config == other.config) && + collateOpts_.mode == other.collateOpts_.mode && collateOpts_.sortOrderTable.GetSortOrderCharacters() == other.collateOpts_.sortOrderTable.GetSortOrderCharacters() && rtreeType_ == other.rtreeType_; } diff --git a/cpp_src/core/indexopts.h b/cpp_src/core/indexopts.h index 22d86ec92..6b6a8c299 100644 --- a/cpp_src/core/indexopts.h +++ b/cpp_src/core/indexopts.h @@ -12,6 +12,8 @@ struct CollateOpts { void Dump(T& os) const; }; +enum class IndexComparison { WithConfig, SkipConfig }; + /// Cpp version of IndexOpts: includes /// sort order table which is not possible /// to link in C-GO version because of templates @@ -37,8 +39,7 @@ struct IndexOpts { IndexOpts& SetConfig(const std::string& config); CollateMode GetCollateMode() const noexcept; - bool operator==(const IndexOpts& other) const { return IsEqual(other, false); } - bool IsEqual(const IndexOpts& other, bool skipConfig) const; + bool IsEqual(const IndexOpts& other, IndexComparison cmpType) const noexcept; template void Dump(T& os) const; @@ -60,7 +61,6 @@ T& operator<<(T& os, IndexOpts::RTreeIndexType t) { return os << "Greene"; case IndexOpts::RStar: return os << "RStar"; - default: - abort(); } + std::abort(); } diff --git a/cpp_src/core/item.cc b/cpp_src/core/item.cc index 7840e7853..5ec196050 100644 --- a/cpp_src/core/item.cc +++ b/cpp_src/core/item.cc @@ -3,7 +3,6 @@ #include "core/itemimpl.h" #include "core/keyvalue/p_string.h" #include "core/namespace/namespace.h" -#include "core/rdxcontext.h" #include "tools/catch_and_return.h" namespace reindexer { diff --git a/cpp_src/core/itemimpl.cc b/cpp_src/core/itemimpl.cc index eb8555094..f8622df5c 100644 --- a/cpp_src/core/itemimpl.cc +++ b/cpp_src/core/itemimpl.cc @@ -158,10 +158,22 @@ void ItemImpl::FromCJSON(std::string_view slice, bool pkOnly, Recoder *recoder) Serializer rdser(data); Payload pl = GetPayload(); - CJsonDecoder decoder(tagsMatcher_, pkOnly ? &pkFields_ : nullptr, recoder); + CJsonDecoder decoder(tagsMatcher_); + ser_.Reset(); ser_.PutUInt32(0); - decoder.Decode(pl, rdser, ser_); + if (pkOnly && !pkFields_.empty()) { + if rx_unlikely (recoder) { + throw Error(errParams, "ItemImpl::FromCJSON: pkOnly mode is not compatible with non-null recoder"); + } + decoder.Decode(pl, rdser, ser_, CJsonDecoder::RestrictingFilter(pkFields_)); + } else { + if (recoder) { + decoder.Decode(pl, rdser, ser_, CJsonDecoder::DummyFilter(), CJsonDecoder::DefaultRecoder(*recoder)); + } else { + decoder.Decode<>(pl, rdser, ser_); + } + } if (!rdser.Eof()) throw Error(errParseJson, "Internal error - left unparsed data %d", rdser.Pos()); @@ -208,7 +220,7 @@ Error ItemImpl::FromJSON(std::string_view slice, char **endp, bool pkOnly) { } // Split parsed json into indexes and tuple - JsonDecoder decoder(tagsMatcher_, pkOnly ? &pkFields_ : nullptr); + JsonDecoder decoder(tagsMatcher_, pkOnly && !pkFields_.empty() ? &pkFields_ : nullptr); Payload pl = GetPayload(); ser_.Reset(); diff --git a/cpp_src/core/itemimpl.h b/cpp_src/core/itemimpl.h index 2b98aea81..484be7ef1 100644 --- a/cpp_src/core/itemimpl.h +++ b/cpp_src/core/itemimpl.h @@ -98,6 +98,7 @@ class ItemImpl : public ItemImplRawData { std::shared_ptr GetSchema() const noexcept { return schema_; } TagsMatcher &tagsMatcher() noexcept { return tagsMatcher_; } + std::shared_ptr &schema() noexcept { return schema_; } void SetPrecepts(const std::vector &precepts) { precepts_ = precepts; diff --git a/cpp_src/core/itemmodifier.cc b/cpp_src/core/itemmodifier.cc index fb2cfa52c..5f46fc0be 100644 --- a/cpp_src/core/itemmodifier.cc +++ b/cpp_src/core/itemmodifier.cc @@ -1,9 +1,9 @@ #include "itemmodifier.h" +#include "core/itemimpl.h" #include "core/namespace/namespaceimpl.h" #include "core/query/expressionevaluator.h" #include "core/selectfunc/functionexecutor.h" #include "index/index.h" -#include "tools/logger.h" namespace reindexer { diff --git a/cpp_src/core/joincache.h b/cpp_src/core/joincache.h index 8ec4030f2..974373e37 100644 --- a/cpp_src/core/joincache.h +++ b/cpp_src/core/joincache.h @@ -54,14 +54,8 @@ struct JoinCacheVal { bool inited = false; std::shared_ptr preResult; }; -typedef LRUCache MainLruCache; -class JoinCache : public MainLruCache { -public: - JoinCache() : MainLruCache(kDefaultCacheSizeLimit * 2, 2) {} - - typedef std::shared_ptr Ptr; -}; +using JoinCache = LRUCache; struct JoinCacheRes { bool haveData = false; diff --git a/cpp_src/core/key_value_type.h b/cpp_src/core/key_value_type.h index 1f44c8d73..31c58a873 100644 --- a/cpp_src/core/key_value_type.h +++ b/cpp_src/core/key_value_type.h @@ -228,7 +228,7 @@ class KeyValueType { return v.value_ == value_; } [[nodiscard]] bool IsSame(KeyValueType other) const noexcept { return value_ == other.value_; } - [[nodiscard]] TagType ToTagType() const { + [[nodiscard]] TagType ToTagType() const noexcept { switch (value_) { case KVT::Int64: case KVT::Int: diff --git a/cpp_src/core/keyvalue/variant.cc b/cpp_src/core/keyvalue/variant.cc index 71d80b12e..19faf5114 100644 --- a/cpp_src/core/keyvalue/variant.cc +++ b/cpp_src/core/keyvalue/variant.cc @@ -559,8 +559,14 @@ Variant Variant::convert(KeyValueType type, const PayloadType *payloadType, cons Variant &Variant::convert(KeyValueType type, const PayloadType *payloadType, const FieldsSet *fields) & { if (isUuid()) { type.EvaluateOneOf([&](KeyValueType::Uuid) noexcept {}, [&](KeyValueType::String) { *this = Variant{std::string{Uuid{*this}}}; }, + [&](KeyValueType::Composite) { + assertrx_throw(payloadType && fields); + Variant tmp{VariantArray{std::move(*this)}}; + tmp.convertToComposite(*payloadType, *fields); + *this = std::move(tmp); + }, [type](OneOf) { + KeyValueType::Tuple, KeyValueType::Undefined, KeyValueType::Null>) { throw Error(errParams, "Can't convert Variant from type '%s' to type '%s'", KeyValueType{KeyValueType::Uuid{}}.Name(), type.Name()); }); @@ -572,12 +578,22 @@ Variant &Variant::convert(KeyValueType type, const PayloadType *payloadType, con [&](KeyValueType::Int64) { *this = Variant(As()); }, [&](KeyValueType::Double) { *this = Variant(As()); }, [&](KeyValueType::String) { *this = Variant(As()); }, [&](KeyValueType::Composite) { - if (variant_.type.Is()) { - assertrx(payloadType && fields); - convertToComposite(payloadType, fields); - } else { - throw Error(errParams, "Can't convert Variant from type '%s' to type '%s'", variant_.type.Name(), type.Name()); - } + variant_.type.EvaluateOneOf( + [&](KeyValueType::Tuple) { + assertrx(payloadType && fields); + convertToComposite(*payloadType, *fields); + }, + [](KeyValueType::Composite) noexcept {}, + [&](OneOf) { + assertrx(payloadType && fields); + Variant tmp{VariantArray{std::move(*this)}}; + tmp.convertToComposite(*payloadType, *fields); + *this = std::move(tmp); + }, + [&](OneOf) { + throw Error(errParams, "Can't convert Variant from type '%s' to type '%s'", variant_.type.Name(), type.Name()); + }); }, [&](KeyValueType::Uuid) { *this = Variant{As()}; }, [&](OneOf) { @@ -586,7 +602,7 @@ Variant &Variant::convert(KeyValueType type, const PayloadType *payloadType, con return *this; } -void Variant::convertToComposite(const PayloadType *payloadType, const FieldsSet *fields) { +void Variant::convertToComposite(const PayloadType &payloadType, const FieldsSet &fields) { assertrx(!isUuid()); assertrx(variant_.type.Is() && variant_.hold == 1); key_string val = *cast(); @@ -594,24 +610,24 @@ void Variant::convertToComposite(const PayloadType *payloadType, const FieldsSet if (variant_.hold == 1) free(); // Alloc usual payloadvalue + extra memory for hold string - auto &pv = *new (cast()) PayloadValue(payloadType->TotalSize() + val->size()); + auto &pv = *new (cast()) PayloadValue(payloadType.TotalSize() + val->size()); variant_.hold = 1; variant_.type = KeyValueType::Composite{}; // Copy serializer buffer with strings to extra payloadvalue memory - char *data = reinterpret_cast(pv.Ptr() + payloadType->TotalSize()); + char *data = reinterpret_cast(pv.Ptr() + payloadType.TotalSize()); memcpy(data, val->data(), val->size()); Serializer ser(std::string_view(data, val->size())); size_t count = ser.GetVarUint(); - if (count != fields->size()) { - throw Error(errLogic, "Invalid count of arguments for composite index, expected %d, got %d", fields->size(), count); + if (count != fields.size()) { + throw Error(errLogic, "Invalid count of arguments for composite index, expected %d, got %d", fields.size(), count); } - Payload pl(*payloadType, pv); + Payload pl(payloadType, pv); - for (auto field : *fields) { + for (auto field : fields) { if (field != IndexValueType::SetByJsonPath) { pl.Set(field, ser.GetVariant()); } else { diff --git a/cpp_src/core/keyvalue/variant.h b/cpp_src/core/keyvalue/variant.h index a4296aacb..f4d495587 100644 --- a/cpp_src/core/keyvalue/variant.h +++ b/cpp_src/core/keyvalue/variant.h @@ -144,7 +144,7 @@ class Variant { private: bool isUuid() const noexcept { return uuid_.isUuid != 0; } - void convertToComposite(const PayloadType *, const FieldsSet *); + void convertToComposite(const PayloadType &, const FieldsSet &); void free() noexcept; void copy(const Variant &other); template @@ -212,6 +212,8 @@ template <> std::string Variant::As() const; class VariantArray : public h_vector { + using Base = h_vector; + public: VariantArray() noexcept = default; explicit VariantArray(Point) noexcept; @@ -225,9 +227,14 @@ class VariantArray : public h_vector { return std::move(*this); } void MarkObject() noexcept { isObjectValue = true; } - using h_vector::h_vector; - using h_vector::operator==; - using h_vector::operator!=; + using Base::Base; + using Base::operator==; + using Base::operator!=; + template + void clear() noexcept { + isArrayValue = isObjectValue = false; + Base::clear(); + } size_t Hash() const noexcept { size_t ret = this->size(); for (size_t i = 0; i < this->size(); ++i) ret = (ret * 127) ^ this->at(i).Hash(); diff --git a/cpp_src/core/lrucache.cc b/cpp_src/core/lrucache.cc index ecfc29ac8..9ab9316c4 100644 --- a/cpp_src/core/lrucache.cc +++ b/cpp_src/core/lrucache.cc @@ -9,7 +9,7 @@ namespace reindexer { -const int kMaxHitCountToCache = 1024; +constexpr uint32_t kMaxHitCountToCache = 1024; template typename LRUCache::Iterator LRUCache::Get(const K &key) { @@ -29,7 +29,7 @@ typename LRUCache::Iterator LRUCache::Get( it->second.lruPos = std::prev(lru_.end()); } - if (++it->second.hitCount < hitCountToCache_) { + if (++it->second.hitCount < int(hitCountToCache_)) { return Iterator(); } ++getCount_; @@ -57,10 +57,10 @@ void LRUCache::Put(const K &key, V &&v) { eraseLRU(); if rx_unlikely (putCount_ * 16 > getCount_ && eraseCount_) { - logPrintf(LogWarning, "IdSetCache::eraseLRU () cache invalidates too fast eraseCount=%d,putCount=%d,getCount=%d", eraseCount_, - putCount_, eraseCount_); + logPrintf(LogWarning, "IdSetCache::eraseLRU () cache invalidates too fast eraseCount=%d,putCount=%d,getCount=%d,hitCountToCache=%d", + eraseCount_, putCount_, eraseCount_, hitCountToCache_); eraseCount_ = 0; - hitCountToCache_ = std::min(hitCountToCache_ * 2, kMaxHitCountToCache); + hitCountToCache_ = hitCountToCache_ ? std::min(hitCountToCache_ * 2, kMaxHitCountToCache) : 2; putCount_ = 0; getCount_ = 0; } @@ -128,7 +128,7 @@ LRUCacheMemStat LRUCache::GetMemStat() { } template class LRUCache; template class LRUCache; -template class LRUCache; +template class LRUCache; template class LRUCache; } // namespace reindexer diff --git a/cpp_src/core/lrucache.h b/cpp_src/core/lrucache.h index e81fa741f..4cac5b979 100644 --- a/cpp_src/core/lrucache.h +++ b/cpp_src/core/lrucache.h @@ -4,20 +4,18 @@ #include #include #include +#include "dbconfig.h" #include "namespace/namespacestat.h" namespace reindexer { -constexpr size_t kDefaultCacheSizeLimit = 1024 * 1024 * 128; -constexpr int kDefaultHitCountToCache = 2; constexpr size_t kElemSizeOverhead = 256; template class LRUCache { public: using Key = K; - LRUCache(size_t sizeLimit = kDefaultCacheSizeLimit, int hitCount = kDefaultHitCountToCache) noexcept - : totalCacheSize_(0), cacheSizeLimit_(sizeLimit), hitCountToCache_(hitCount) {} + LRUCache(size_t sizeLimit, uint32_t hitCount) noexcept : totalCacheSize_(0), cacheSizeLimit_(sizeLimit), hitCountToCache_(hitCount) {} struct Iterator { Iterator(bool k = false, const V &v = V()) : valid(k), val(v) {} Iterator(const Iterator &other) = delete; @@ -118,7 +116,7 @@ class LRUCache { mutable std::mutex lock_; size_t totalCacheSize_; const size_t cacheSizeLimit_; - int hitCountToCache_; + uint32_t hitCountToCache_; uint64_t getCount_ = 0, putCount_ = 0, eraseCount_ = 0; }; diff --git a/cpp_src/core/namespace/asyncstorage.cc b/cpp_src/core/namespace/asyncstorage.cc index 4f2aa4091..d94d0b08c 100644 --- a/cpp_src/core/namespace/asyncstorage.cc +++ b/cpp_src/core/namespace/asyncstorage.cc @@ -52,7 +52,6 @@ void AsyncStorage::Destroy() { throwOnStorageCopy(); if (storage_) { - tryReopenStorage(); clearUpdates(); storage_->Destroy(path_); reset(); diff --git a/cpp_src/core/namespace/itemsloader.h b/cpp_src/core/namespace/itemsloader.h index 0432a9310..3d39c27c8 100644 --- a/cpp_src/core/namespace/itemsloader.h +++ b/cpp_src/core/namespace/itemsloader.h @@ -1,6 +1,7 @@ #pragma once #include +#include "core/itemimpl.h" #include "namespaceimpl.h" namespace reindexer { diff --git a/cpp_src/core/namespace/namespace.cc b/cpp_src/core/namespace/namespace.cc index 3bc79b243..733bb309e 100644 --- a/cpp_src/core/namespace/namespace.cc +++ b/cpp_src/core/namespace/namespace.cc @@ -35,6 +35,8 @@ void Namespace::CommitTransaction(Transaction& tx, QueryResults& result, const R CounterGuardAIR32 cg(nsl->cancelCommitCnt_); try { auto rlck = statCalculator.CreateLock(*nsl, &NamespaceImpl::rLock, ctx); + tx.ValidatePK(nsl->pkFields()); + auto storageLock = statCalculator.CreateLock(nsl->storage_, &AsyncStorage::FullLock); cg.Reset(); @@ -105,10 +107,14 @@ bool Namespace::needNamespaceCopy(const NamespaceImpl::Ptr& ns, const Transactio void Namespace::doRename(const Namespace::Ptr& dst, const std::string& newName, const std::string& storagePath, const RdxContext& ctx) { std::string dbpath; const auto flushOpts = StorageFlushOpts().WithImmediateReopen(); - awaitMainNs(ctx)->storage_.Flush(flushOpts); auto lck = handleInvalidation(NamespaceImpl::wLock)(ctx); - auto& srcNs = *atomicLoadMainNs(); // -V758 - srcNs.storage_.Flush(flushOpts); // Repeat flush, to raise any disk errors before attempt to close storage + auto srcNsPtr = atomicLoadMainNs(); + auto& srcNs = *srcNsPtr; + srcNs.storage_.Flush(flushOpts); // Repeat flush, to raise any disk errors before attempt to close storage + auto storageStatus = srcNs.storage_.GetStatusCached(); + if (!storageStatus.err.ok()) { + throw Error(storageStatus.err.code(), "Unable to flush storage before rename: %s", storageStatus.err.what()); + } NamespaceImpl::Mutex* dstMtx = nullptr; NamespaceImpl::Ptr dstNs; if (dst) { diff --git a/cpp_src/core/namespace/namespace.h b/cpp_src/core/namespace/namespace.h index 71864740d..55dc59548 100644 --- a/cpp_src/core/namespace/namespace.h +++ b/cpp_src/core/namespace/namespace.h @@ -131,7 +131,12 @@ class Namespace { } void OnConfigUpdated(DBConfigProvider &configProvider, const RdxContext &ctx) { NamespaceConfigData configData; - configProvider.GetNamespaceConfig(GetName(ctx), configData); + const auto nsName = GetName(ctx); + std::string_view realNsName(nsName); + if (isTmpNamespaceNameFast(nsName)) { + realNsName = demangleTmpNamespaceName(realNsName); + } + configProvider.GetNamespaceConfig(realNsName, configData); startCopyPolicyTxSize_.store(configData.startCopyPolicyTxSize, std::memory_order_relaxed); copyPolicyMultiplier_.store(configData.copyPolicyMultiplier, std::memory_order_relaxed); txSizeToAlwaysCopy_.store(configData.txSizeToAlwaysCopy, std::memory_order_relaxed); @@ -211,7 +216,7 @@ class Namespace { ns->tryForceFlush(std::move(locker)); } else if constexpr (std::is_same_v) { auto params = longUpdDelLoggingParams_.load(std::memory_order_relaxed); - const bool isEnabled = params.thresholdUs >= 0 && !isSystemNamespaceNameFast(v._namespace); + const bool isEnabled = params.thresholdUs >= 0 && !isSystemNamespaceNameFast(v.NsName()); auto statCalculator = QueryStatCalculator(long_actions::MakeLogger(v, std::move(params)), isEnabled); auto locker = statCalculator.CreateLock(*ns, &NamespaceImpl::wLock, ctx.rdxContext); calc.LockHit(); diff --git a/cpp_src/core/namespace/namespaceimpl.cc b/cpp_src/core/namespace/namespaceimpl.cc index 7848af01e..9c1f0d9f7 100644 --- a/cpp_src/core/namespace/namespaceimpl.cc +++ b/cpp_src/core/namespace/namespaceimpl.cc @@ -10,6 +10,7 @@ #include "core/index/ttlindex.h" #include "core/itemimpl.h" #include "core/itemmodifier.h" +#include "core/nsselecter/crashqueryreporter.h" #include "core/nsselecter/nsselecter.h" #include "core/payload/payloadiface.h" #include "core/querystat.h" @@ -72,14 +73,15 @@ NamespaceImpl::NamespaceImpl(const NamespaceImpl& src, AsyncStorage::FullLockT& storage_{src.storage_, storageLock}, replStateUpdates_{src.replStateUpdates_.load()}, meta_{src.meta_}, - queryTotalCountCache_{std::make_shared()}, sparseIndexesCount_{src.sparseIndexesCount_}, krefs{src.krefs}, skrefs{src.skrefs}, sysRecordsVersions_{src.sysRecordsVersions_}, - joinCache_{std::make_shared()}, enablePerfCounters_{src.enablePerfCounters_.load()}, config_{src.config_}, + queryCountCache_{ + std::make_unique(config_.cacheConfig.queryCountCacheSize, config_.cacheConfig.queryCountHitsToCache)}, + joinCache_{std::make_unique(config_.cacheConfig.joinCacheSize, config_.cacheConfig.joinHitsToCache)}, wal_{src.wal_, storage_}, repl_{src.repl_}, observers_{src.observers_}, @@ -109,9 +111,10 @@ NamespaceImpl::NamespaceImpl(const std::string& name, UpdatesObservers& observer name_(name), payloadType_(name), tagsMatcher_(payloadType_), - queryTotalCountCache_(std::make_shared()), - joinCache_(std::make_shared()), enablePerfCounters_(false), + queryCountCache_( + std::make_unique(config_.cacheConfig.queryCountCacheSize, config_.cacheConfig.queryCountHitsToCache)), + joinCache_(std::make_unique(config_.cacheConfig.joinCacheSize, config_.cacheConfig.joinHitsToCache)), wal_(config_.walSize), observers_(&observers), lastSelectTime_{0}, @@ -241,6 +244,9 @@ void NamespaceImpl::OnConfigUpdated(DBConfigProvider& configProvider, const RdxC config_.optimizationSortWorkers, configData.optimizationSortWorkers, config_.optimizationTimeout, configData.optimizationTimeout); } + const bool needReconfigureIdxCache = !config_.cacheConfig.IsIndexesCacheEqual(configData.cacheConfig); + const bool needReconfigureJoinCache = !config_.cacheConfig.IsJoinCacheEqual(configData.cacheConfig); + const bool needReconfigureQueryCountCache = !config_.cacheConfig.IsQueryCountCacheEqual(configData.cacheConfig); config_ = configData; storageOpts_.LazyLoad(configData.lazyLoad); storageOpts_.noQueryIdleThresholdSec = configData.noQueryIdleThreshold; @@ -249,6 +255,27 @@ void NamespaceImpl::OnConfigUpdated(DBConfigProvider& configProvider, const RdxC for (auto& idx : indexes_) { idx->EnableUpdatesCountingMode(configData.idxUpdatesCountingMode); } + if (needReconfigureIdxCache) { + for (auto& idx : indexes_) { + idx->ReconfigureCache(config_.cacheConfig); + } + logPrintf(LogTrace, + "[%s] Indexes cache has been reconfigured. IdSets cache (for each index): { max_size %lu KB; hits: %u }. FullTextIdSets " + "cache (for each ft-index): { max_size %lu KB; hits: %u }", + name_, config_.cacheConfig.idxIdsetCacheSize / 1024, config_.cacheConfig.idxIdsetHitsToCache, + config_.cacheConfig.ftIdxCacheSize / 1024, config_.cacheConfig.ftIdxHitsToCache); + } + if (needReconfigureJoinCache) { + joinCache_ = std::make_unique(config_.cacheConfig.joinCacheSize, config_.cacheConfig.joinHitsToCache); + logPrintf(LogTrace, "[%s] Join cache has been reconfigured: { max_size %lu KB; hits: %u }", name_, + config_.cacheConfig.joinCacheSize / 1024, config_.cacheConfig.joinHitsToCache); + } + if (needReconfigureQueryCountCache) { + queryCountCache_ = + std::make_unique(config_.cacheConfig.queryCountCacheSize, config_.cacheConfig.queryCountHitsToCache); + logPrintf(LogTrace, "[%s] Queries count cache has been reconfigured: { max_size %lu KB; hits: %u }", name_, + config_.cacheConfig.queryCountCacheSize / 1024, config_.cacheConfig.queryCountHitsToCache); + } if (needReoptimizeIndexes) { updateSortedIdxCount(); @@ -386,8 +413,8 @@ NamespaceImpl::RollBack_recreateCompositeIndexes NamespaceImpl::re indexDef.opts_ = index->Opts(); indexDef.FromType(index->Type()); - createFieldsSet>(indexDef.name_, index->Type(), index->Fields(), fields); - auto newIndex{Index::New(indexDef, payloadType_, fields)}; + createCompositeFieldsSet>(indexDef.name_, index->Fields(), fields); + auto newIndex{Index::New(indexDef, PayloadType{payloadType_}, FieldsSet{fields}, config_.cacheConfig)}; rollbacker.SaveIndex(std::move(index)); std::swap(index, newIndex); @@ -425,7 +452,7 @@ class NamespaceImpl::RollBack_updateItems : private RollBackBase { } rollbacker_recreateCompositeIndexes_.RollBack(); for (auto& idx : ns_.indexes_) { - idx->UpdatePayloadType(ns_.payloadType_); + idx->UpdatePayloadType(PayloadType{ns_.payloadType_}); } Disable(); } @@ -478,7 +505,7 @@ NamespaceImpl::RollBack_updateItems NamespaceImpl::updateItems(con repl_.dataHash, itemsDataSize_}; for (auto& idx : indexes_) { - idx->UpdatePayloadType(payloadType_); + idx->UpdatePayloadType(PayloadType{payloadType_}); } VariantArray skrefsDel, skrefsUps; @@ -588,9 +615,10 @@ void NamespaceImpl::AddIndex(const IndexDef& indexDef, const RdxContext& ctx) { auto wlck = wLock(ctx); - addIndex(indexDef); - saveIndexesToStorage(); - addToWAL(indexDef, WalIndexAdd, ctx); + if (addIndex(indexDef) || ctx.fromReplication_) { + saveIndexesToStorage(); + addToWAL(indexDef, WalIndexAdd, ctx); + } } void NamespaceImpl::DumpIndex(std::ostream& os, std::string_view index, const RdxContext& ctx) const { @@ -600,9 +628,10 @@ void NamespaceImpl::DumpIndex(std::ostream& os, std::string_view index, const Rd void NamespaceImpl::UpdateIndex(const IndexDef& indexDef, const RdxContext& ctx) { auto wlck = wLock(ctx); - updateIndex(indexDef); - saveIndexesToStorage(); - addToWAL(indexDef, WalIndexUpdate, ctx); + if (updateIndex(indexDef) || ctx.fromReplication_) { + saveIndexesToStorage(); + addToWAL(indexDef, WalIndexUpdate, ctx); + } } void NamespaceImpl::DropIndex(const IndexDef& indexDef, const RdxContext& ctx) { @@ -614,6 +643,20 @@ void NamespaceImpl::DropIndex(const IndexDef& indexDef, const RdxContext& ctx) { void NamespaceImpl::SetSchema(std::string_view schema, const RdxContext& ctx) { auto wlck = wLock(ctx); + + if (!ctx.fromReplication_) { + if (schema_ && schema_->GetJSON() == Schema::AppendProtobufNumber(schema, schema_->GetProtobufNsNumber())) { + return; + } + if (repl_.slaveMode) { + logPrintf(LogWarning, + "[repl:%s]:%d Attempt to set new JSON-schema for the replicated namespace via user interface, which does not " + "correspond to the current schema. New schema was ignored to avoid force syncs", + name_, serverId_); + return; + } + } + schema_ = std::make_shared(schema); auto fields = schema_->GetPaths(); for (auto& field : fields) { @@ -745,22 +788,28 @@ void NamespaceImpl::verifyCompositeIndex(const IndexDef& indexDef) const { throw Error{errParams, "Composite index cannot be sparse. Use non-sparse composite instead"}; } for (const auto& jp : indexDef.jsonPaths_) { - const auto it = indexesNames_.find(jp); - if (it == indexesNames_.end()) { + int idx; + if (!getIndexByName(jp, idx)) { if (!IsFullText(indexDef.Type())) { throw Error(errParams, "Composite indexes over non-indexed field ('%s') are not supported yet (except for full-text indexes). Create " "at least column index('-') over each field inside the composite index", jp); } - continue; - } - const auto& idx = indexes_[it->second]; - if (idx->IsUuid() && type != IndexCompositeHash) { - throw Error{errParams, "Only hash index allowed on UUID field"}; - } - if (IsComposite(idx->Type())) { - throw Error(errParams, "Cannot create composite index '%s' over the other composite '%s'", indexDef.name_, idx->Name()); + } else { + const auto& index = *indexes_[idx]; + if (index.Opts().IsSparse()) { + throw Error(errParams, "Composite indexes over sparse indexed field ('%s') are not supported yet", jp); + } + if (type != IndexCompositeHash && index.IsUuid()) { + throw Error{errParams, "Only hash index allowed on UUID field"}; + } + if (index.Opts().IsArray() && !IsFullText(type)) { + throw Error(errParams, "Cannot add array subindex '%s' to not fulltext composite index '%s'", jp, indexDef.name_); + } + if (IsComposite(index.Type())) { + throw Error(errParams, "Cannot create composite index '%s' over the other composite '%s'", indexDef.name_, index.Name()); + } } } } @@ -846,13 +895,13 @@ void NamespaceImpl::verifyUpdateIndex(const IndexDef& indexDef) const { return; } - const auto newIndex = std::unique_ptr(Index::New(indexDef, PayloadType(), FieldsSet())); + const auto newIndex = std::unique_ptr(Index::New(indexDef, PayloadType(), FieldsSet(), config_.cacheConfig)); if (indexDef.opts_.IsSparse()) { if (indexDef.jsonPaths_.size() != 1) { throw Error(errParams, "Sparse index must have exactly 1 JSON-path, but %d paths found for '%s'", indexDef.jsonPaths_.size(), indexDef.name_); } - const auto newSparseIndex = std::unique_ptr(Index::New(indexDef, payloadType_, {})); + const auto newSparseIndex = std::unique_ptr(Index::New(indexDef, PayloadType{payloadType_}, {}, config_.cacheConfig)); } else { FieldsSet changedFields{idxNameIt->second}; PayloadType newPlType = payloadType_; @@ -972,23 +1021,20 @@ class NamespaceImpl::RollBack_addIndex : private RollBackBase { bool needResetPayloadTypeInTagsMatcher_{false}; }; -void NamespaceImpl::addIndex(const IndexDef& indexDef) { +bool NamespaceImpl::addIndex(const IndexDef& indexDef) { const auto& indexName = indexDef.name_; if (const auto idxNameIt = indexesNames_.find(indexName); idxNameIt != indexesNames_.end()) { - IndexDef newIndexDef = indexDef; IndexDef oldIndexDef = getIndexDefinition(indexName); - // reset config - oldIndexDef.opts_.config = ""; - newIndexDef.opts_.config = ""; - if (newIndexDef == oldIndexDef) { - return; + if (indexDef.IsEqual(oldIndexDef, IndexComparison::SkipConfig)) { + return false; } else { if (oldIndexDef.Type() == IndexTtl) { + IndexDef newIndexDef = indexDef; oldIndexDef.expireAfter_ = newIndexDef.expireAfter_; - if (oldIndexDef == newIndexDef) { + if (oldIndexDef.IsEqual(newIndexDef, IndexComparison::SkipConfig)) { auto indx = indexes_[idxNameIt->second].get(); UpdateExpireAfter(indx, newIndexDef.expireAfter_); - return; + return true; } } throw Error(errConflict, "Index '%s.%s' already exists with different settings", name_, indexName); @@ -1005,7 +1051,7 @@ void NamespaceImpl::addIndex(const IndexDef& indexDef) { if (IsComposite(indexDef.Type())) { verifyCompositeIndex(indexDef); addCompositeIndex(indexDef); - return; + return true; } const int idxNo = payloadType_->NumFields(); @@ -1025,20 +1071,20 @@ void NamespaceImpl::addIndex(const IndexDef& indexDef) { TagsPath tagsPath = tagsMatcher_.path2tag(jsonPaths[0], true); assertrx(tagsPath.size() > 0); fields.push_back(std::move(tagsPath)); - auto newIndex = Index::New(indexDef, payloadType_, fields); + auto newIndex = Index::New(indexDef, PayloadType{payloadType_}, std::move(fields), config_.cacheConfig); rollbacker.RollBacker_insertIndex(insertIndex(std::move(newIndex), idxNo, indexName)); ++sparseIndexesCount_; rollbacker.NeedDecreaseSparseIndexCount(); fillSparseIndex(*indexes_[idxNo], jsonPaths[0]); } else { PayloadType oldPlType = payloadType_; - auto newIndex = Index::New(indexDef, PayloadType(), FieldsSet()); + auto newIndex = Index::New(indexDef, PayloadType(), FieldsSet(), config_.cacheConfig); payloadType_.Add(PayloadFieldType{newIndex->KeyType(), indexName, jsonPaths, newIndex->Opts().IsArray()}); rollbacker.SetOldPayloadType(std::move(oldPlType)); tagsMatcher_.UpdatePayloadType(payloadType_); rollbacker.NeedResetPayloadTypeInTagsMatcher(); newIndex->SetFields(FieldsSet(idxNo)); - newIndex->UpdatePayloadType(payloadType_); + newIndex->UpdatePayloadType(PayloadType{payloadType_}); FieldsSet changedFields{0, idxNo}; rollbacker.RollBacker_insertIndex(insertIndex(std::move(newIndex), idxNo, indexName)); @@ -1046,6 +1092,7 @@ void NamespaceImpl::addIndex(const IndexDef& indexDef) { } updateSortedIdxCount(); rollbacker.Disable(); + return true; } void NamespaceImpl::fillSparseIndex(Index& index, std::string_view jsonPath) { @@ -1063,24 +1110,26 @@ void NamespaceImpl::fillSparseIndex(Index& index, std::string_view jsonPath) { markUpdated(false); } -void NamespaceImpl::updateIndex(const IndexDef& indexDef) { +bool NamespaceImpl::updateIndex(const IndexDef& indexDef) { const std::string& indexName = indexDef.name_; IndexDef foundIndex = getIndexDefinition(indexName); - if (indexDef.IsEqual(foundIndex, true)) { + if (indexDef.IsEqual(foundIndex, IndexComparison::SkipConfig)) { // Index has not been changed - if (!indexDef.IsEqual(foundIndex, false)) { + if (!indexDef.IsEqual(foundIndex, IndexComparison::WithConfig)) { // Only index config changed // Just call SetOpts indexes_[getIndexByName(indexName)]->SetOpts(indexDef.opts_); + return true; } - return; + return false; } verifyUpdateIndex(indexDef); dropIndex(indexDef); addIndex(indexDef); + return true; } IndexDef NamespaceImpl::getIndexDefinition(const std::string& indexName) const { @@ -1094,27 +1143,19 @@ IndexDef NamespaceImpl::getIndexDefinition(const std::string& indexName) const { void NamespaceImpl::verifyUpdateCompositeIndex(const IndexDef& indexDef) const { verifyCompositeIndex(indexDef); - IndexType type = indexDef.Type(); - - for (auto& jsonPathOrSubIdx : indexDef.jsonPaths_) { - auto idxNameIt = indexesNames_.find(jsonPathOrSubIdx); - if (idxNameIt != indexesNames_.end() && !indexes_[idxNameIt->second]->Opts().IsSparse() && - indexes_[idxNameIt->second]->Opts().IsArray() && (type == IndexCompositeBTree || type == IndexCompositeHash)) { - throw Error(errParams, "Cannot add array subindex '%s' to composite index '%s'", jsonPathOrSubIdx, indexDef.name_); - } - } - const auto newIndex = std::unique_ptr(Index::New(indexDef, payloadType_, {})); + const auto newIndex = std::unique_ptr(Index::New(indexDef, PayloadType{payloadType_}, {}, config_.cacheConfig)); } void NamespaceImpl::addCompositeIndex(const IndexDef& indexDef) { const auto& indexName = indexDef.name_; FieldsSet fields; - createFieldsSet(indexName, indexDef.Type(), indexDef.jsonPaths_, fields); + createCompositeFieldsSet(indexName, indexDef.jsonPaths_, fields); assertrx(indexesNames_.find(indexName) == indexesNames_.end()); const int idxPos = indexes_.size(); - auto insertIndex_rollbacker{insertIndex(Index::New(indexDef, payloadType_, fields), idxPos, indexName)}; + auto insertIndex_rollbacker{ + insertIndex(Index::New(indexDef, PayloadType{payloadType_}, FieldsSet{fields}, config_.cacheConfig), idxPos, indexName)}; auto indexesCacheCleaner{GetIndexesCacheCleaner()}; for (IdType rowId = 0; rowId < int(items_.size()); rowId++) { @@ -1134,7 +1175,7 @@ void NamespaceImpl::addCompositeIndex(const IndexDef& indexDef) { } template -void NamespaceImpl::createFieldsSet(const std::string& idxName, IndexType type, const PathsT& paths, FieldsSet& fields) { +void NamespaceImpl::createCompositeFieldsSet(const std::string& idxName, const PathsT& paths, FieldsSet& fields) { fields.clear(); const JsonPathsContainerT* jsonPaths = nullptr; @@ -1149,22 +1190,19 @@ void NamespaceImpl::createFieldsSet(const std::string& idxName, IndexType type, } for (const auto& jsonPathOrSubIdx : *jsonPaths) { - auto idxNameIt = indexesNames_.find(jsonPathOrSubIdx); - if (idxNameIt == indexesNames_.end() || idxName == jsonPathOrSubIdx) { + int idx; + if (!getScalarIndexByName(jsonPathOrSubIdx, idx) /* || idxName == jsonPathOrSubIdx*/) { // TODO may be uncomment TagsPath tagsPath = tagsMatcher_.path2tag(jsonPathOrSubIdx, true); if (tagsPath.empty()) { throw Error(errLogic, "Unable to get or create json-path '%s' for composite index '%s'", jsonPathOrSubIdx, idxName); } fields.push_back(tagsPath); fields.push_back(jsonPathOrSubIdx); - } else if (indexes_[idxNameIt->second]->Opts().IsSparse() && !indexes_[idxNameIt->second]->Opts().IsArray()) { - fields.push_back(jsonPathOrSubIdx); - fields.push_back(indexes_[idxNameIt->second]->Fields().getTagsPath(0)); } else { - if (indexes_[idxNameIt->second]->Opts().IsArray() && (type == IndexCompositeBTree || type == IndexCompositeHash)) { - throw Error(errParams, "Cannot add array subindex '%s' to composite index '%s'", jsonPathOrSubIdx, idxName); - } - fields.push_back(idxNameIt->second); + const auto& idxFields = indexes_[idx]->Fields(); + assertrx_throw(idxFields.size() == 1); + assertrx_throw(idxFields[0] >= 0); + fields.push_back(idxFields[0]); } } @@ -1192,6 +1230,16 @@ int NamespaceImpl::getIndexByNameOrJsonPath(std::string_view index) const { } } +int NamespaceImpl::getScalarIndexByName(std::string_view index) const { + int idx; + if (getIndexByName(index, idx)) { + if (idx < indexes_.firstCompositePos()) { + return idx; + } + } + throw Error(errParams, "Index '%s' not found in '%s'", index, name_); +} + bool NamespaceImpl::getIndexByName(std::string_view name, int& index) const { auto it = indexesNames_.find(name); if (it == indexesNames_.end()) return false; @@ -1211,6 +1259,17 @@ bool NamespaceImpl::getIndexByNameOrJsonPath(std::string_view name, int& index) return false; } +bool NamespaceImpl::getScalarIndexByName(std::string_view name, int& index) const { + int idx; + if (getIndexByName(name, idx)) { + if (idx < indexes_.firstCompositePos()) { + index = idx; + return true; + } + } + return false; +} + bool NamespaceImpl::getSparseIndexByJsonPath(std::string_view jsonPath, int& index) const { // FIXME: Try to merge getIndexByNameOrJsonPath and getSparseIndexByJsonPath if it's possible for (int i = indexes_.firstSparsePos(), end = indexes_.firstSparsePos() + indexes_.sparseIndexesSize(); i < end; ++i) { @@ -1234,9 +1293,11 @@ void NamespaceImpl::doUpdate(const Query& query, QueryResults& result, const NsC selCtx.contextCollectingMode = true; selCtx.requiresCrashTracking = true; selCtx.inTransaction = ctx.inTransaction; + selCtx.crashReporterQueryType = QueryUpdate; selecter(result, selCtx, ctx.rdxContext); - auto tmStart = high_resolution_clock::now(); + ActiveQueryScope queryScope(query, QueryUpdate, optimizationState_, strHolder_.get()); + const auto tmStart = high_resolution_clock::now(); bool updateWithJson = false; bool withExpressions = false; @@ -1292,7 +1353,7 @@ void NamespaceImpl::doUpdate(const Query& query, QueryResults& result, const NsC if (!ctx.rdxContext.fromReplication_) setReplLSNs(LSNPair(lsn_t(), lsn)); } - if rx_unlikely (query.debugLevel >= LogInfo) { + if (query.debugLevel >= LogInfo) { logPrintf(LogInfo, "Updated %d items in %d µs", result.Count(), duration_cast(high_resolution_clock::now() - tmStart).count()); } @@ -1345,6 +1406,9 @@ void NamespaceImpl::ModifyItem(Item& item, ItemModifyMode mode, const RdxContext calc.LockHit(); checkApplySlaveUpdate(ctx.fromReplication_); + if (mode == ModeDelete && rx_unlikely(item.PkFields() != pkFields())) { + throw Error(errNotValid, "Item has outdated PK metadata (probably PK has been change during the Delete-call)"); + } modifyItem(item, mode, ctx); tryForceFlush(std::move(wlck)); @@ -1419,12 +1483,15 @@ void NamespaceImpl::doDelete(const Query& q, QueryResults& result, const NsConte selCtx.contextCollectingMode = true; selCtx.requiresCrashTracking = true; selCtx.inTransaction = ctx.inTransaction; + selCtx.crashReporterQueryType = QueryDelete; SelectFunctionsHolder func; selCtx.functions = &func; selecter(result, selCtx, ctx.rdxContext); + + ActiveQueryScope queryScope(q, QueryDelete, optimizationState_, strHolder_.get()); assertrx(result.IsNamespaceAdded(this)); + const auto tmStart = high_resolution_clock::now(); - auto tmStart = high_resolution_clock::now(); AsyncStorage::AdviceGuardT storageAdvice; if (result.Items().size() >= AsyncStorage::kLimitToAdviceBatching) { storageAdvice = storage_.AdviceBatching(); @@ -1442,12 +1509,13 @@ void NamespaceImpl::doDelete(const Query& q, QueryResults& result, const NsConte WrSerializer cjson; for (auto it : result) { cjson.Reset(); - it.GetCJSON(cjson, false); + const auto err = it.GetCJSON(cjson, false); + if (!err.ok()) throw err; const WALRecord wrec{WalItemModify, cjson.Slice(), tagsMatcher_.version(), ModeDelete, ctx.inTransaction}; processWalRecord(wrec, ctx.rdxContext); } } - if rx_unlikely (q.debugLevel >= LogInfo) { + if (q.debugLevel >= LogInfo) { logPrintf(LogInfo, "Deleted %d items in %d µs", result.Count(), duration_cast(high_resolution_clock::now() - tmStart).count()); } @@ -1489,7 +1557,8 @@ void NamespaceImpl::doTruncate(const NsContext& ctx) { itemsDataSize_ = 0; for (size_t i = 0; i < indexes_.size(); ++i) { const IndexOpts opts = indexes_[i]->Opts(); - std::unique_ptr newIdx{Index::New(getIndexDefinition(i), indexes_[i]->GetPayloadType(), indexes_[i]->Fields())}; + std::unique_ptr newIdx{Index::New(getIndexDefinition(i), PayloadType{indexes_[i]->GetPayloadType()}, + FieldsSet{indexes_[i]->Fields()}, config_.cacheConfig)}; newIdx->SetOpts(opts); std::swap(indexes_[i], newIdx); removeIndex(newIdx); @@ -1533,7 +1602,9 @@ void NamespaceImpl::setReplLSNs(LSNPair LSNs) { repl_.originLSN = LSNs.originLSN_; repl_.lastUpstreamLSN = LSNs.upstreamLSN_; replStateUpdates_.fetch_add(1, std::memory_order_release); - logPrintf(LogTrace, "[repl:%s]:%d setReplLSNs originLSN = %s upstreamLSN=%s", name_, serverId_, LSNs.originLSN_, LSNs.upstreamLSN_); + if (!isSystem()) { + logPrintf(LogTrace, "[repl:%s]:%d setReplLSNs originLSN = %s upstreamLSN=%s", name_, serverId_, LSNs.originLSN_, LSNs.upstreamLSN_); + } } void NamespaceImpl::setSlaveMode(const RdxContext& ctx) { @@ -1587,6 +1658,7 @@ void NamespaceImpl::CommitTransaction(Transaction& tx, QueryResults& result, NsC wlck = queryStatCalculator.CreateLock(*this, &NamespaceImpl::wLock, ctx.rdxContext); cg.Reset(); calc.LockHit(); + tx.ValidatePK(pkFields()); } checkApplySlaveUpdate(ctx.rdxContext.fromReplication_); @@ -1908,12 +1980,10 @@ void NamespaceImpl::optimizeIndexes(const NsContext& ctx) { rlck = rLock(ctx.rdxContext); } - if (isSystem()) return; - if (!lastUpdateTime || !config_.optimizationTimeout || now - lastUpdateTime < config_.optimizationTimeout) { + if (isSystem() || repl_.temporary || !indexes_.size()) { return; } - - if (!indexes_.size()) { + if (!lastUpdateTime || !config_.optimizationTimeout || now - lastUpdateTime < config_.optimizationTimeout) { return; } @@ -1988,7 +2058,7 @@ void NamespaceImpl::markUpdated(bool forceOptimizeAllIndexes) { int expected{OptimizationCompleted}; optimizationState_.compare_exchange_strong(expected, OptimizedPartially); } - queryTotalCountCache_->Clear(); + queryCountCache_->Clear(); joinCache_->Clear(); lastUpdateTime_.store( std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(), @@ -2066,7 +2136,7 @@ NamespaceMemStat NamespaceImpl::GetMemStat(const RdxContext& ctx) { auto rlck = rLock(ctx); ret.name = name_; ret.joinCache = joinCache_->GetMemStat(); - ret.queryCache = queryTotalCountCache_->GetMemStat(); + ret.queryCache = queryCountCache_->GetMemStat(); ret.itemsCount = ItemsCount(); *(static_cast(&ret.replication)) = getReplState(); @@ -2580,6 +2650,10 @@ Item NamespaceImpl::newItem() { auto impl_ = pool_.get(0, payloadType_, tagsMatcher_, pkFields(), schema_); impl_->tagsMatcher() = tagsMatcher_; impl_->tagsMatcher().clearUpdated(); + impl_->schema() = schema_; +#ifdef RX_WITH_STDLIB_DEBUG + assertrx_dbg(impl_->PkFields() == pkFields()); +#endif // RX_WITH_STDLIB_DEBUG return Item(impl_.release()); } diff --git a/cpp_src/core/namespace/namespaceimpl.h b/cpp_src/core/namespace/namespaceimpl.h index 46fa4d1e1..e7be3ab9e 100644 --- a/cpp_src/core/namespace/namespaceimpl.h +++ b/cpp_src/core/namespace/namespaceimpl.h @@ -20,11 +20,10 @@ #include "core/schema.h" #include "core/storage/idatastorage.h" #include "core/storage/storagetype.h" -#include "core/transactionimpl.h" +#include "core/transaction.h" #include "estl/contexted_locks.h" #include "estl/fast_hash_map.h" #include "estl/shared_mutex.h" -#include "estl/smart_lock.h" #include "estl/syncpool.h" #include "replicator/updatesobserver.h" #include "replicator/waltracker.h" @@ -258,8 +257,10 @@ class NamespaceImpl : public intrusive_atomic_rc_base { // NOLINT(*performance. int getIndexByName(std::string_view index) const; int getIndexByNameOrJsonPath(std::string_view name) const; + int getScalarIndexByName(std::string_view name) const; bool getIndexByName(std::string_view name, int &index) const; bool getIndexByNameOrJsonPath(std::string_view name, int &index) const; + bool getScalarIndexByName(std::string_view name, int &index) const; bool getSparseIndexByJsonPath(std::string_view jsonPath, int &index) const; void FillResult(QueryResults &result, const IdSet &ids) const; @@ -281,6 +282,12 @@ class NamespaceImpl : public intrusive_atomic_rc_base { // NOLINT(*performance. int getNsNumber() const { return schema_ ? schema_->GetProtobufNsNumber() : 0; } IndexesCacheCleaner GetIndexesCacheCleaner() { return IndexesCacheCleaner{*this}; } void SetDestroyFlag() { dbDestroyed_ = true; } + Error FlushStorage(const RdxContext &ctx) { + const auto flushOpts = StorageFlushOpts().WithImmediateReopen(); + auto lck = rLock(ctx); + storage_.Flush(flushOpts); + return storage_.GetStatusCached().err; + } private: struct SysRecordsVersions { @@ -342,16 +349,16 @@ class NamespaceImpl : public intrusive_atomic_rc_base { // NOLINT(*performance. void doDelete(IdType id); void optimizeIndexes(const NsContext &); [[nodiscard]] RollBack_insertIndex insertIndex(std::unique_ptr newIndex, int idxNo, const std::string &realName); - void addIndex(const IndexDef &indexDef); + bool addIndex(const IndexDef &indexDef); void addCompositeIndex(const IndexDef &indexDef); template - void createFieldsSet(const std::string &idxName, IndexType type, const PathsT &paths, FieldsSet &fields); + void createCompositeFieldsSet(const std::string &idxName, const PathsT &paths, FieldsSet &fields); void verifyCompositeIndex(const IndexDef &indexDef) const; template void verifyAddIndex(const IndexDef &indexDef, GetNameF &&) const; void verifyUpdateIndex(const IndexDef &indexDef) const; void verifyUpdateCompositeIndex(const IndexDef &indexDef) const; - void updateIndex(const IndexDef &indexDef); + bool updateIndex(const IndexDef &indexDef); void dropIndex(const IndexDef &index); void addToWAL(const IndexDef &indexDef, WALRecType type, const RdxContext &ctx); void addToWAL(std::string_view json, WALRecType type, const RdxContext &ctx); @@ -413,8 +420,6 @@ class NamespaceImpl : public intrusive_atomic_rc_base { // NOLINT(*performance. std::unordered_map meta_; - shared_ptr queryTotalCountCache_; - int sparseIndexesCount_ = 0; VariantArray krefs, skrefs; @@ -431,7 +436,7 @@ class NamespaceImpl : public intrusive_atomic_rc_base { // NOLINT(*performance. NamespaceImpl(const NamespaceImpl &src, AsyncStorage::FullLockT &storageLock); - bool isSystem() const { return !name_.empty() && name_[0] == '#'; } + bool isSystem() const { return isSystemNamespaceNameFast(name_); } IdType createItem(size_t realSize); void checkApplySlaveUpdate(bool v); @@ -450,12 +455,12 @@ class NamespaceImpl : public intrusive_atomic_rc_base { // NOLINT(*performance. } } - JoinCache::Ptr joinCache_; - PerfStatCounterMT updatePerfCounter_, selectPerfCounter_; std::atomic enablePerfCounters_; NamespaceConfigData config_; + std::unique_ptr queryCountCache_; + std::unique_ptr joinCache_; // Replication variables WALTracker wal_; ReplicationState repl_; diff --git a/cpp_src/core/namespace/namespacestat.h b/cpp_src/core/namespace/namespacestat.h index dd02f8f9b..9fe6ca7bb 100644 --- a/cpp_src/core/namespace/namespacestat.h +++ b/cpp_src/core/namespace/namespacestat.h @@ -22,14 +22,6 @@ struct LRUCacheMemStat { size_t itemsCount = 0; size_t emptyCount = 0; size_t hitCountLimit = 0; - - LRUCacheMemStat &operator+=(const LRUCacheMemStat &other) noexcept { - totalSize += other.totalSize; - itemsCount += other.itemsCount; - emptyCount += other.emptyCount; - hitCountLimit += other.hitCountLimit; - return *this; - } }; struct IndexMemStat { diff --git a/cpp_src/core/nsselecter/aggregator.cc b/cpp_src/core/nsselecter/aggregator.cc index 81194c7d3..837bebab0 100644 --- a/cpp_src/core/nsselecter/aggregator.cc +++ b/cpp_src/core/nsselecter/aggregator.cc @@ -26,8 +26,13 @@ static void copy(It begin, It end, std::vector &facets, const Field ConstPayload pl(payloadType, begin->first); VariantArray va; if (fields[i] == IndexValueType::SetByJsonPath) { - const TagsPath &tagsPath = fields.getTagsPath(tagPathIdx++); - pl.GetByJsonPath(tagsPath, va, KeyValueType::Undefined{}); + if (fields.isTagsPathIndexed(tagPathIdx)) { + const IndexedTagsPath &tagsPath = fields.getIndexedTagsPath(tagPathIdx++); + pl.GetByJsonPath(tagsPath, va, KeyValueType::Undefined{}); + } else { + const TagsPath &tagsPath = fields.getTagsPath(tagPathIdx++); + pl.GetByJsonPath(tagsPath, va, KeyValueType::Undefined{}); + } if (va.IsObjectValue()) { throw Error(errQueryExec, "Cannot aggregate object field"); } @@ -221,7 +226,7 @@ Aggregator::Aggregator(const PayloadType &payloadType, const FieldsSet &fields, } } else { if (sort.empty()) { - facets_ = std::make_unique(MultifieldUnorderedMap{payloadType_, fields_}); + facets_ = std::make_unique(MultifieldUnorderedMap{PayloadType{payloadType_}, FieldsSet{fields_}}); } else { facets_ = std::make_unique(MultifieldOrderedMap{MultifieldComparator{sort, fields_, payloadType_}}); } diff --git a/cpp_src/core/nsselecter/crashqueryreporter.cc b/cpp_src/core/nsselecter/crashqueryreporter.cc index dcfe7036b..7c5fa2ba0 100644 --- a/cpp_src/core/nsselecter/crashqueryreporter.cc +++ b/cpp_src/core/nsselecter/crashqueryreporter.cc @@ -9,36 +9,55 @@ namespace reindexer { struct QueryDebugContext { - SelectCtx *selectCtx = nullptr; + const Query *mainQuery = nullptr; + const Query *parentQuery = nullptr; std::atomic *nsOptimizationState = nullptr; ExplainCalc *explainCalc = nullptr; std::atomic_bool *nsLockerState = nullptr; StringsHolder *nsStrHolder = nullptr; + QueryType realQueryType = QuerySelect; }; thread_local QueryDebugContext g_queryDebugCtx; ActiveQueryScope::ActiveQueryScope(SelectCtx &ctx, std::atomic &nsOptimizationState, ExplainCalc &explainCalc, - std::atomic_bool &nsLockerState, StringsHolder *strHolder) + std::atomic_bool &nsLockerState, StringsHolder *strHolder) noexcept : isTrackedQuery_(ctx.requiresCrashTracking) { if (isTrackedQuery_) { - g_queryDebugCtx.selectCtx = &ctx; + g_queryDebugCtx.mainQuery = &ctx.query; + g_queryDebugCtx.parentQuery = ctx.parentQuery; g_queryDebugCtx.nsOptimizationState = &nsOptimizationState; g_queryDebugCtx.explainCalc = &explainCalc; g_queryDebugCtx.nsLockerState = &nsLockerState; g_queryDebugCtx.nsStrHolder = strHolder; + g_queryDebugCtx.realQueryType = ctx.crashReporterQueryType; } } + +ActiveQueryScope::ActiveQueryScope(const Query &q, QueryType realQueryType, std::atomic &nsOptimizationState, + StringsHolder *strHolder) noexcept + : isTrackedQuery_(true) { + g_queryDebugCtx.mainQuery = &q; + g_queryDebugCtx.parentQuery = nullptr; + g_queryDebugCtx.nsOptimizationState = &nsOptimizationState; + g_queryDebugCtx.explainCalc = nullptr; + g_queryDebugCtx.nsLockerState = nullptr; + g_queryDebugCtx.nsStrHolder = strHolder; + g_queryDebugCtx.realQueryType = realQueryType; +} + ActiveQueryScope::~ActiveQueryScope() { if (isTrackedQuery_) { - if (!g_queryDebugCtx.selectCtx) { - logPrintf(LogWarning, "~ActiveQueryScope: Empty context for tracked query"); + if (!g_queryDebugCtx.mainQuery) { + logPrintf(LogWarning, "~ActiveQueryScope: Empty query pointer in the ActiveQueryScope"); } - g_queryDebugCtx.selectCtx = nullptr; + g_queryDebugCtx.mainQuery = nullptr; + g_queryDebugCtx.parentQuery = nullptr; g_queryDebugCtx.nsOptimizationState = nullptr; g_queryDebugCtx.explainCalc = nullptr; g_queryDebugCtx.nsLockerState = nullptr; g_queryDebugCtx.nsStrHolder = nullptr; + g_queryDebugCtx.realQueryType = QuerySelect; } } @@ -57,41 +76,51 @@ static std::string_view nsOptimizationStateName(int state) { } void PrintCrashedQuery(std::ostream &out) { - if (!g_queryDebugCtx.selectCtx) { + if (!g_queryDebugCtx.mainQuery && !g_queryDebugCtx.parentQuery) { out << "*** No additional info from crash query tracker ***" << std::endl; return; } out << "*** Current query dump ***" << std::endl; - out << " Query: " << g_queryDebugCtx.selectCtx->query.GetSQL() << std::endl; - if (g_queryDebugCtx.selectCtx->parentQuery) { - out << " Parent Query: " << g_queryDebugCtx.selectCtx->parentQuery->GetSQL() << std::endl; + if (g_queryDebugCtx.mainQuery) { + out << " Query: " << g_queryDebugCtx.mainQuery->GetSQL(g_queryDebugCtx.realQueryType) << std::endl; + } + if (g_queryDebugCtx.parentQuery) { + out << " Parent Query: " << g_queryDebugCtx.parentQuery->GetSQL() << std::endl; } - out << " NS state: " << nsOptimizationStateName(g_queryDebugCtx.nsOptimizationState->load()) << std::endl; - out << " NS.locker state: "; - if (g_queryDebugCtx.nsLockerState->load()) { - out << " readonly"; - } else { - out << " regular"; + if (g_queryDebugCtx.nsOptimizationState) { + out << " NS state: " << nsOptimizationStateName(g_queryDebugCtx.nsOptimizationState->load()) << std::endl; } - out << std::endl; - out << " NS.strHolder state: [" << std::endl; - out << " memstat = " << g_queryDebugCtx.nsStrHolder->MemStat() << std::endl; - out << " holds indexes = " << std::boolalpha << g_queryDebugCtx.nsStrHolder->HoldsIndexes() << std::endl; - if (g_queryDebugCtx.nsStrHolder->HoldsIndexes()) { - const auto &indexes = g_queryDebugCtx.nsStrHolder->Indexes(); - out << " indexes.size = " << indexes.size() << std::endl; - out << " indexes = ["; - for (size_t i = 0; i < indexes.size(); ++i) { - if (i) out << " "; - out << indexes[i]->Name(); + if (g_queryDebugCtx.nsLockerState) { + out << " NS.locker state: "; + if (g_queryDebugCtx.nsLockerState->load()) { + out << " readonly"; + } else { + out << " regular"; + } + out << std::endl; + } + if (g_queryDebugCtx.nsStrHolder) { + out << " NS.strHolder state: [" << std::endl; + out << " memstat = " << g_queryDebugCtx.nsStrHolder->MemStat() << std::endl; + out << " holds indexes = " << std::boolalpha << g_queryDebugCtx.nsStrHolder->HoldsIndexes() << std::endl; + if (g_queryDebugCtx.nsStrHolder->HoldsIndexes()) { + const auto &indexes = g_queryDebugCtx.nsStrHolder->Indexes(); + out << " indexes.size = " << indexes.size() << std::endl; + out << " indexes = ["; + for (size_t i = 0; i < indexes.size(); ++i) { + if (i) out << " "; + out << indexes[i]->Name(); + } + out << "]" << std::endl; } out << "]" << std::endl; } - out << "]" << std::endl; - out << " Explain: " << g_queryDebugCtx.explainCalc->GetJSON() << std::endl; + if (g_queryDebugCtx.explainCalc) { + out << " Explain: " << g_queryDebugCtx.explainCalc->GetJSON() << std::endl; + } - g_queryDebugCtx.selectCtx = nullptr; + g_queryDebugCtx.mainQuery = g_queryDebugCtx.parentQuery = nullptr; } } // namespace reindexer diff --git a/cpp_src/core/nsselecter/crashqueryreporter.h b/cpp_src/core/nsselecter/crashqueryreporter.h index 5ed0aff96..b76ab67a2 100644 --- a/cpp_src/core/nsselecter/crashqueryreporter.h +++ b/cpp_src/core/nsselecter/crashqueryreporter.h @@ -2,17 +2,20 @@ #include #include +#include "core/type_consts.h" namespace reindexer { struct SelectCtx; class ExplainCalc; class StringsHolder; +class Query; class ActiveQueryScope { public: ActiveQueryScope(SelectCtx &ctx, std::atomic &nsOptimizationState, ExplainCalc &explainCalc, std::atomic_bool &nsLockerState, - StringsHolder *strHolder); + StringsHolder *strHolder) noexcept; + ActiveQueryScope(const Query &q, QueryType realQueryType, std::atomic &nsOptimizationState, StringsHolder *strHolder) noexcept; ~ActiveQueryScope(); public: diff --git a/cpp_src/core/nsselecter/fieldscomparator.h b/cpp_src/core/nsselecter/fieldscomparator.h index dfe375bd0..cda7dd1ff 100644 --- a/cpp_src/core/nsselecter/fieldscomparator.h +++ b/cpp_src/core/nsselecter/fieldscomparator.h @@ -18,13 +18,13 @@ class FieldsComparator { const std::string& Name() const&& = delete; std::string Dump() const { return Name(); } int GetMatchedCount() const noexcept { return matchedCount_; } - void SetLeftField(const TagsPath& tpath) { - setField(tpath, ctx_[0].lCtx_); + void SetLeftField(const FieldsSet& fields) { + setField(fields, ctx_[0].lCtx_); leftFieldSet = true; } - void SetRightField(const TagsPath& tpath) { + void SetRightField(const FieldsSet& fields) { assertrx(leftFieldSet); - setField(tpath, ctx_[0].rCtx_); + setField(fields, ctx_[0].rCtx_); } void SetLeftField(const FieldsSet& fset, KeyValueType type, bool isArray) { if (type.Is()) { @@ -67,6 +67,11 @@ class FieldsComparator { }; void setField(const TagsPath& tpath, FieldContext& fctx) { fctx.fields_.push_back(tpath); } + void setField(const FieldsSet& fields, FieldContext& fctx) { + assertrx_throw(fields.size() == 1); + assertrx_throw(fields[0] == IndexValueType::SetByJsonPath); + setField(fields.getTagsPath(0), fctx); + } void setField(FieldContext& fctx, FieldsSet fset, KeyValueType type, bool isArray) { fctx.fields_ = std::move(fset); fctx.type_ = type; diff --git a/cpp_src/core/nsselecter/joinedselector.cc b/cpp_src/core/nsselecter/joinedselector.cc index 84fbe4b8b..7492f23a5 100644 --- a/cpp_src/core/nsselecter/joinedselector.cc +++ b/cpp_src/core/nsselecter/joinedselector.cc @@ -3,6 +3,7 @@ #include "core/namespace/namespaceimpl.h" #include "core/queryresults/joinresults.h" #include "nsselecter.h" +#include "vendor/sparse-map/sparse_set.h" constexpr size_t kMaxIterationsScaleForInnerJoinOptimization = 100; @@ -53,8 +54,8 @@ void JoinedSelector::selectFromPreResultValues(QueryResults &joinItemR, const Qu for (const ItemRef &item : preResult_->values) { auto &v = item.Value(); assertrx(!v.IsFree()); - if (query.entries.CheckIfSatisfyConditions({preResult_->values.payloadType, v}, preResult_->values.tagsMatcher)) { - if (++matched > query.count) break; + if (query.entries.CheckIfSatisfyConditions({preResult_->values.payloadType, v})) { + if (++matched > query.Limit()) break; found = true; joinItemR.Add(item); } @@ -76,21 +77,12 @@ bool JoinedSelector::Process(IdType rowId, int nsId, ConstPayload payload, bool std::unique_ptr itemQueryCopy; Query *itemQueryPtr = &itemQuery_; for (auto &je : joinQuery_.joinEntries_) { - const bool nonIndexedField = (je.idxNo == IndexValueType::SetByJsonPath); - if (nonIndexedField) { - VariantArray &values = itemQueryPtr->entries.Get(i).values; - const KeyValueType type{values.empty() ? KeyValueType::Undefined{} : values[0].Type()}; - payload.GetByJsonPath(je.index_, leftNs_->tagsMatcher_, values, type); - } else { - const auto &index = *leftNs_->indexes_[je.idxNo]; - const auto &fields = index.Fields(); - if (fields.getJsonPathsLength() == 0) { - payload.Get(fields[0], itemQueryPtr->entries.Get(i).values); - } else { - payload.GetByJsonPath(fields.getTagsPath(0), itemQueryPtr->entries.Get(i).values, index.KeyType()); - } + QueryEntry &qentry = itemQueryPtr->entries.Get(i); + { + auto keyValues = qentry.UpdatableValues(QueryEntry::IgnoreEmptyValues{}); + payload.GetByFieldsSet(je.LeftFields(), keyValues, je.LeftFieldType(), je.LeftCompositeFieldsTypes()); } - if (itemQueryPtr->entries.Get(i).values.empty()) { + if (qentry.Values().empty()) { if (itemQueryPtr == &itemQuery_) { itemQueryCopy = std::unique_ptr{new Query(itemQuery_)}; itemQueryPtr = itemQueryCopy.get(); @@ -99,7 +91,7 @@ bool JoinedSelector::Process(IdType rowId, int nsId, ConstPayload payload, bool } ++i; } - itemQueryPtr->Limit(match ? joinQuery_.count : 0); + itemQueryPtr->Limit(match ? joinQuery_.Limit() : 0); bool found = false; bool matchedAtLeastOnce = false; @@ -122,58 +114,70 @@ bool JoinedSelector::Process(IdType rowId, int nsId, ConstPayload payload, bool return matchedAtLeastOnce; } -template -void JoinedSelector::readValuesFromRightNs(VariantArray &values, const KeyValueType leftIndexType, [[maybe_unused]] int rightIdxNo, - [[maybe_unused]] std::string_view rightIndex) const { - std::unordered_set set; - VariantArray buffer; - for (IdType rowId : preResult_->ids) { - if (rightNs_->items_[rowId].IsFree()) continue; - buffer.clear(); - const ConstPayload pl{rightNs_->payloadType_, rightNs_->items_[rowId]}; - if constexpr (byJsonPath) { - pl.GetByJsonPath(rightIndex, rightNs_->tagsMatcher_, buffer, leftIndexType); - } else { - pl.Get(rightIdxNo, buffer); +template +VariantArray JoinedSelector::readValuesOfRightNsFrom(const Cont &data, const Fn &createPayload, const QueryJoinEntry &entry, + const PayloadType &pt) const { + const auto rightFieldType = entry.RightFieldType(); + const auto leftFieldType = entry.LeftFieldType(); + VariantArray res; + if (rightFieldType.Is()) { + unordered_payload_set set(data.size(), hash_composite(pt, entry.RightFields()), equal_composite(pt, entry.RightFields())); + for (const auto &v : data) { + const auto pl = createPayload(v); + if (pl) { + set.insert(*pl->Value()); + } } - if (!leftIndexType.Is() && !leftIndexType.Is()) { - for (Variant &v : buffer) set.insert(std::move(v.convert(leftIndexType))); - } else { - for (Variant &v : buffer) set.insert(std::move(v)); + res.reserve(set.size()); + for (auto &s : set) { + res.emplace_back(std::move(s)); } - } - values.reserve(set.size()); - std::move(set.begin(), set.end(), std::back_inserter(values)); -} - -template -void JoinedSelector::readValuesFromPreResult(VariantArray &values, const KeyValueType leftIndexType, int rightIdxNo, - std::string_view rightIndex) const { - std::unordered_set set; - VariantArray buffer; - for (const ItemRef &item : preResult_->values) { - buffer.clear(); - assertrx(!item.Value().IsFree()); - const ConstPayload pl{preResult_->values.payloadType, item.Value()}; - if constexpr (byJsonPath) { - pl.GetByJsonPath(rightIndex, preResult_->values.tagsMatcher, buffer, leftIndexType); - (void)rightIdxNo; - } else { - pl.Get(rightIdxNo, buffer); - (void)rightIndex; + } else { + tsl::sparse_set set(data.size()); + for (const auto &v : data) { + const auto pl = createPayload(v); + if (!pl) { + continue; + } + pl->GetByFieldsSet(entry.RightFields(), res, entry.RightFieldType(), entry.RightCompositeFieldsTypes()); + if (!leftFieldType.Is() && !leftFieldType.Is()) { + for (Variant &v : res) set.insert(std::move(v.convert(leftFieldType))); + } else { + for (Variant &v : res) set.insert(std::move(v)); + } } - if (!leftIndexType.Is() && !leftIndexType.Is()) { - for (Variant &v : buffer) set.insert(std::move(v.convert(leftIndexType))); - } else { - for (Variant &v : buffer) set.insert(std::move(v)); + res.clear(); + for (auto &s : set) { + res.emplace_back(std::move(s)); } } - values.reserve(set.size()); - std::move(set.begin(), set.end(), std::back_inserter(values)); + return res; +} + +VariantArray JoinedSelector::readValuesFromRightNs(const QueryJoinEntry &entry) const { + return readValuesOfRightNsFrom( + preResult_->ids, + [this](IdType rowId) -> std::optional { + const auto &item = rightNs_->items_[rowId]; + if (item.IsFree()) { + return std::nullopt; + } + return ConstPayload{rightNs_->payloadType_, item}; + }, + entry, rightNs_->payloadType_); } -template void JoinedSelector::readValuesFromPreResult(VariantArray &, KeyValueType, int, std::string_view) const; -template void JoinedSelector::readValuesFromPreResult(VariantArray &, KeyValueType, int, std::string_view) const; +VariantArray JoinedSelector::readValuesFromPreResult(const QueryJoinEntry &entry) const { + return readValuesOfRightNsFrom( + preResult_->values, + [this](const ItemRef &item) -> std::optional { + if (item.Value().IsFree()) { + return std::nullopt; + } + return ConstPayload{preResult_->values.payloadType, item.Value()}; + }, + entry, preResult_->values.payloadType); +} void JoinedSelector::AppendSelectIteratorOfJoinIndexData(SelectIteratorContainer &iterators, int *maxIterations, unsigned sortId, const SelectFunction::Ptr &selectFnc, const RdxContext &rdxCtx) { @@ -184,39 +188,25 @@ void JoinedSelector::AppendSelectIteratorOfJoinIndexData(SelectIteratorContainer return; } unsigned optimized = 0; - assertrx(preResult_->dataMode != JoinPreResult::ModeValues || itemQuery_.entries.Size() == joinQuery_.joinEntries_.size()); + assertrx_throw(preResult_->dataMode != JoinPreResult::ModeValues || itemQuery_.entries.Size() == joinQuery_.joinEntries_.size()); for (size_t i = 0; i < joinQuery_.joinEntries_.size(); ++i) { const QueryJoinEntry &joinEntry = joinQuery_.joinEntries_[i]; - if (joinEntry.op_ != OpAnd || (joinEntry.condition_ != CondEq && joinEntry.condition_ != CondSet) || - (i + 1 < joinQuery_.joinEntries_.size() && joinQuery_.joinEntries_[i + 1].op_ == OpOr) || - joinEntry.idxNo == IndexValueType::SetByJsonPath) { + if (!joinEntry.IsLeftFieldIndexed() || joinEntry.Operation() != OpAnd || + (joinEntry.Condition() != CondEq && joinEntry.Condition() != CondSet) || + (i + 1 < joinQuery_.joinEntries_.size() && joinQuery_.joinEntries_[i + 1].Operation() == OpOr)) { continue; } - const auto &leftIndex = leftNs_->indexes_[joinEntry.idxNo]; + const auto &leftIndex = leftNs_->indexes_[joinEntry.LeftIdxNo()]; assertrx(!IsFullText(leftIndex->Type())); if (leftIndex->Opts().IsSparse()) continue; VariantArray values; if (preResult_->dataMode == JoinPreResult::ModeIdSet) { - int rightIdxNo = IndexValueType::NotSet; - if (rightNs_->getIndexByNameOrJsonPath(joinEntry.joinIndex_, rightIdxNo) && - !rightNs_->indexes_[rightIdxNo]->Opts().IsSparse()) { - readValuesFromRightNs(values, leftIndex->SelectKeyType(), rightIdxNo, joinEntry.joinIndex_); - } else { - readValuesFromRightNs(values, leftIndex->SelectKeyType(), rightIdxNo, joinEntry.joinIndex_); - } + values = readValuesFromRightNs(joinEntry); } else { - assertrx(itemQuery_.entries.HoldsOrReferTo(i)); - const QueryEntry &qe = itemQuery_.entries.Get(i); - assertrx(qe.index == joinEntry.joinIndex_); - const int rightIdxNo = qe.idxNo; - if (rightIdxNo == IndexValueType::SetByJsonPath) { - readValuesFromPreResult(values, leftIndex->SelectKeyType(), rightIdxNo, joinEntry.joinIndex_); - } else { - readValuesFromPreResult(values, leftIndex->SelectKeyType(), rightIdxNo, joinEntry.joinIndex_); - } + values = readValuesFromPreResult(joinEntry); } - auto ctx = selectFnc ? selectFnc->CreateCtx(joinEntry.idxNo) : BaseFunctionCtx::Ptr{}; + auto ctx = selectFnc ? selectFnc->CreateCtx(joinEntry.LeftIdxNo()) : BaseFunctionCtx::Ptr{}; assertrx(!ctx || ctx->type != BaseFunctionCtx::kFtCtx); if (leftIndex->Opts().GetCollateMode() == CollateUTF8) { @@ -230,9 +220,9 @@ void JoinedSelector::AppendSelectIteratorOfJoinIndexData(SelectIteratorContainer bool was = false; for (SelectKeyResult &res : leftIndex->SelectKey(values, CondSet, sortId, opts, ctx, rdxCtx)) { if (!res.comparators_.empty()) continue; - SelectIterator selIter{res, false, joinEntry.index_, - (joinEntry.idxNo < 0 ? IteratorFieldKind::NonIndexed : IteratorFieldKind::Indexed), false}; - selIter.Bind(leftNs_->payloadType_, joinEntry.idxNo); + SelectIterator selIter{res, false, joinEntry.LeftFieldName(), + (joinEntry.LeftIdxNo() < 0 ? IteratorFieldKind::NonIndexed : IteratorFieldKind::Indexed), false}; + selIter.Bind(leftNs_->payloadType_, joinEntry.LeftIdxNo()); const int curIterations = selIter.GetMaxIterations(); if (curIterations && curIterations < *maxIterations) *maxIterations = curIterations; iterators.Append(OpAnd, std::move(selIter)); diff --git a/cpp_src/core/nsselecter/joinedselector.h b/cpp_src/core/nsselecter/joinedselector.h index 0fb63758d..531a1c25e 100644 --- a/cpp_src/core/nsselecter/joinedselector.h +++ b/cpp_src/core/nsselecter/joinedselector.h @@ -88,7 +88,13 @@ class JoinedSelector { joinedSelectorsCount_(joinedSelectorsCount), rdxCtx_(rdxCtx), optimized_(false), - inTransaction_{inTransaction} {} + inTransaction_{inTransaction} { +#ifndef NDEBUG + for (const auto &jqe : joinQuery_.joinEntries_) { + assertrx_throw(jqe.FieldsHaveBeenSet()); + } +#endif + } JoinedSelector(JoinedSelector &&) = default; JoinedSelector &operator=(JoinedSelector &&) = delete; @@ -98,7 +104,7 @@ class JoinedSelector { bool Process(IdType, int nsId, ConstPayload, bool match); JoinType Type() const noexcept { return joinType_; } void SetType(JoinType type) noexcept { joinType_ = type; } - const std::string &RightNsName() const noexcept { return itemQuery_._namespace; } + const std::string &RightNsName() const noexcept { return itemQuery_.NsName(); } const JoinedQuery &JoinQuery() const noexcept { return joinQuery_; } int Called() const noexcept { return called_; } int Matched() const noexcept { return matched_; } @@ -109,10 +115,11 @@ class JoinedSelector { const NamespaceImpl::Ptr &RightNs() const noexcept { return rightNs_; } private: - template - void readValuesFromRightNs(VariantArray &values, KeyValueType leftIndexType, int rightIdxNo, std::string_view rightIndex) const; - template - void readValuesFromPreResult(VariantArray &values, KeyValueType leftIndexType, int rightIdxNo, std::string_view rightIndex) const; + [[nodiscard]] VariantArray readValuesFromRightNs(const QueryJoinEntry &) const; + [[nodiscard]] VariantArray readValuesFromPreResult(const QueryJoinEntry &) const; + template + [[nodiscard]] VariantArray readValuesOfRightNsFrom(const Cont &from, const Fn &createPayload, const QueryJoinEntry &, + const PayloadType &) const; void selectFromRightNs(QueryResults &joinItemR, const Query &, bool &found, bool &matchedAtLeastOnce); void selectFromPreResultValues(QueryResults &joinItemR, const Query &, bool &found, bool &matchedAtLeastOnce) const; @@ -134,7 +141,4 @@ class JoinedSelector { }; using JoinedSelectors = std::vector; -extern template void JoinedSelector::readValuesFromPreResult(VariantArray &, KeyValueType, int, std::string_view) const; -extern template void JoinedSelector::readValuesFromPreResult(VariantArray &, KeyValueType, int, std::string_view) const; - } // namespace reindexer diff --git a/cpp_src/core/nsselecter/joinedselectormock.h b/cpp_src/core/nsselecter/joinedselectormock.h index 5db3a49e6..e5cc58ea3 100644 --- a/cpp_src/core/nsselecter/joinedselectormock.h +++ b/cpp_src/core/nsselecter/joinedselectormock.h @@ -17,7 +17,7 @@ class JoinedSelectorMock { public: JoinedSelectorMock(JoinType jt, reindexer::JoinedQuery q) : query_{std::move(q)}, qr_{}, joinType_{jt} {} const reindexer::JoinedQuery& JoinQuery() const noexcept { return query_; } - const std::string& RightNsName() const noexcept { return query_._namespace; } + const std::string& RightNsName() const noexcept { return query_.NsName(); } reindexer::QueryResults& QueryResults() noexcept { return qr_; } const reindexer::QueryResults& QueryResults() const noexcept { return qr_; } JoinType Type() const noexcept { return joinType_; } diff --git a/cpp_src/core/nsselecter/nsselecter.cc b/cpp_src/core/nsselecter/nsselecter.cc index 0e7bf7650..69f54074e 100644 --- a/cpp_src/core/nsselecter/nsselecter.cc +++ b/cpp_src/core/nsselecter/nsselecter.cc @@ -1,6 +1,5 @@ #include "nsselecter.h" -#include "core/cjson/jsonbuilder.h" #include "core/namespace/namespaceimpl.h" #include "core/queryresults/joinresults.h" #include "crashqueryreporter.h" @@ -48,13 +47,13 @@ void NsSelecter::operator()(QueryResults &result, SelectCtx &ctx, const RdxConte bool containAggCount = containSomeAggCount(AggCount); bool containAggCountCached = containAggCount ? false : containSomeAggCount(AggCountCached); - bool needCalcTotal = aggregationQueryRef.calcTotal == ModeAccurateTotal || containAggCount; + bool needCalcTotal = aggregationQueryRef.CalcTotal() == ModeAccurateTotal || containAggCount; QueryCacheKey ckey; - if (aggregationQueryRef.calcTotal == ModeCachedTotal || containAggCountCached) { + if (aggregationQueryRef.CalcTotal() == ModeCachedTotal || containAggCountCached) { ckey = QueryCacheKey{ctx.query}; - auto cached = ns_->queryTotalCountCache_->Get(ckey); + auto cached = ns_->queryCountCache_->Get(ckey); if (cached.valid && cached.val.total_count >= 0) { result.totalCount += cached.val.total_count; logPrintf(LogTrace, "[%s] using value from cache: %d", ns_->name_, result.totalCount); @@ -362,10 +361,10 @@ void NsSelecter::operator()(QueryResults &result, SelectCtx &ctx, const RdxConte } } // Put count/count_cached to aggretions - if (aggregationQueryRef.calcTotal != ModeNoTotal || containAggCount || containAggCountCached) { + if (aggregationQueryRef.CalcTotal() != ModeNoTotal || containAggCount || containAggCountCached) { AggregationResult ret; ret.fields = {"*"}; - ret.type = (aggregationQueryRef.calcTotal == ModeAccurateTotal || containAggCount) ? AggCount : AggCountCached; + ret.type = (aggregationQueryRef.CalcTotal() == ModeAccurateTotal || containAggCount) ? AggCount : AggCountCached; if (ctx.isMergeQuerySubQuery()) { assertrx_throw(!result.aggregationResults.empty()); auto &agg = result.aggregationResults.back(); @@ -404,7 +403,7 @@ void NsSelecter::operator()(QueryResults &result, SelectCtx &ctx, const RdxConte if (needPutCachedTotal) { logPrintf(LogTrace, "[%s] put totalCount value into query cache: %d ", ns_->name_, result.totalCount); - ns_->queryTotalCountCache_->Put(ckey, {static_cast(result.totalCount - initTotalCount)}); + ns_->queryCountCache_->Put(ckey, {static_cast(result.totalCount - initTotalCount)}); } if (ctx.preResult && ctx.preResult->executionMode == JoinPreResult::ModeBuild) { switch (ctx.preResult->dataMode) { @@ -783,7 +782,7 @@ It NsSelecter::applyForcedSortImpl(NamespaceImpl &ns, It begin, It end, const It // implementation for composite indexes const auto &payloadType = ns.payloadType_; const FieldsSet &fields = ns.indexes_[idx]->Fields(); - unordered_payload_map sortMap(0, payloadType, fields); + unordered_payload_map sortMap(0, PayloadType{payloadType}, FieldsSet{fields}); ForcedMapInserter inserter{sortMap}; for (auto value : forcedSortOrder) { value.convert(fieldType, &payloadType, &fields); @@ -1323,7 +1322,7 @@ void NsSelecter::prepareSortJoinedIndex(size_t nsIdx, std::string_view column, i .FieldByName(std::string{column}, index); if (index == IndexValueType::SetByJsonPath) { skipSortingEntry |= !validateField( - strictMode, column, js.joinQuery_._namespace, + strictMode, column, js.joinQuery_.NsName(), js.preResult_->dataMode == JoinPreResult::ModeValues ? js.preResult_->values.tagsMatcher : js.rightNs_->tagsMatcher_); } } @@ -1565,11 +1564,11 @@ size_t NsSelecter::calculateNormalCost(const QueryEntries &qentries, SelectCtx & [&costCalculator](const JoinQueryEntry &) { costCalculator.MarkInapposite(); }, [&costCalculator](const BetweenFieldsQueryEntry &) { costCalculator.MarkInapposite(); }, [&](const QueryEntry &qe) { - if (qe.idxNo < 0) { + if (!qe.IsFieldIndexed()) { costCalculator.MarkInapposite(); return; } - if (qe.idxNo == ctx.sortingContext.uncommitedIndex) { + if (qe.IndexNo() == ctx.sortingContext.uncommitedIndex) { if (sortIndexSearchState == SortIndexNotFound) { const bool isExpectingIdSet = qentries.GetOperation(i) == OpAnd && (next == sz || qentries.GetOperation(next) != OpOr); @@ -1591,7 +1590,7 @@ size_t NsSelecter::calculateNormalCost(const QueryEntries &qentries, SelectCtx & return; } - auto &index = ns_->indexes_[qe.idxNo]; + auto &index = ns_->indexes_[qe.IndexNo()]; if (IsFullText(index->Type())) { costCalculator.MarkInapposite(); return; @@ -1604,8 +1603,8 @@ size_t NsSelecter::calculateNormalCost(const QueryEntries &qentries, SelectCtx & opts.inTransaction = ctx.inTransaction; try { - SelectKeyResults reslts = index->SelectKey(qe.values, qe.condition, 0, opts, nullptr, rdxCtx); - costCalculator.Add(reslts, qe.idxNo == ctx.sortingContext.uncommitedIndex); + SelectKeyResults reslts = index->SelectKey(qe.Values(), qe.Condition(), 0, opts, nullptr, rdxCtx); + costCalculator.Add(reslts, qe.IndexNo() == ctx.sortingContext.uncommitedIndex); } catch (const Error &) { costCalculator.MarkInapposite(); } @@ -1632,7 +1631,7 @@ size_t NsSelecter::calculateOptimizedCost(size_t costNormal, const QueryEntries [&costCalculator](const JoinQueryEntry &) { costCalculator.MarkInapposite(); }, [&costCalculator](const BetweenFieldsQueryEntry &) { costCalculator.MarkInapposite(); }, [&](const QueryEntry &qe) { - if (qe.idxNo < 0 || qe.idxNo != ctx.sortingContext.uncommitedIndex) { + if (!qe.IsFieldIndexed() || qe.IndexNo() != ctx.sortingContext.uncommitedIndex) { costCalculator.MarkInapposite(); return; } @@ -1645,7 +1644,7 @@ size_t NsSelecter::calculateOptimizedCost(size_t costNormal, const QueryEntries opts.inTransaction = ctx.inTransaction; try { - SelectKeyResults reslts = ns_->indexes_[qe.idxNo]->SelectKey(qe.values, qe.condition, 0, opts, nullptr, rdxCtx); + SelectKeyResults reslts = ns_->indexes_[qe.IndexNo()]->SelectKey(qe.Values(), qe.Condition(), 0, opts, nullptr, rdxCtx); costCalculator.Add(reslts); } catch (const Error &) { costCalculator.MarkInapposite(); @@ -1662,7 +1661,7 @@ bool NsSelecter::isSortOptimizatonEffective(const QueryEntries &qentries, Select } if (qentries.Size() == 1 && qentries.HoldsOrReferTo(0)) { const auto &qe = qentries.Get(0); - if (qe.idxNo == ctx.sortingContext.uncommitedIndex) { + if (qe.IndexNo() == ctx.sortingContext.uncommitedIndex) { return SelectIteratorContainer::IsExpectingOrderedResults(qe); } } @@ -1696,8 +1695,8 @@ bool NsSelecter::isSortOptimizatonEffective(const QueryEntries &qentries, Select // TODO: It's possible to evaluate this multiplier, based on the query conditions, but the only way to avoid corner cases is to // allow user to hint this optimization. const size_t limitMultiplier = std::max(size_t(20), size_t(totalItemsCount / expectedMaxIterationsNormal) * 4); - const auto offset = ctx.query.HasOffset() ? ctx.query.start : 1; - costOptimized = limitMultiplier * (ctx.query.count + offset); + const auto offset = ctx.query.HasOffset() ? ctx.query.Offset() : 1; + costOptimized = limitMultiplier * (ctx.query.Limit() + offset); } return costOptimized <= costNormal; } diff --git a/cpp_src/core/nsselecter/nsselecter.h b/cpp_src/core/nsselecter/nsselecter.h index 1a51ab7eb..6d4c87f34 100644 --- a/cpp_src/core/nsselecter/nsselecter.h +++ b/cpp_src/core/nsselecter/nsselecter.h @@ -11,7 +11,7 @@ enum class IsMergeQuery : bool { Yes = true, No = false }; enum class IsFTQuery { Yes, No, NotSet }; struct SelectCtx { - explicit SelectCtx(const Query &query_, const Query *parentQuery_) : query(query_), parentQuery(parentQuery_) {} + explicit SelectCtx(const Query &query_, const Query *parentQuery_) noexcept : query(query_), parentQuery(parentQuery_) {} const Query &query; JoinedSelectors *joinedSelectors = nullptr; SelectFunctionsHolder *functions = nullptr; @@ -28,6 +28,7 @@ struct SelectCtx { bool inTransaction = false; IsMergeQuery isMergeQuery = IsMergeQuery::No; IsFTQuery isFtQuery = IsFTQuery::NotSet; + QueryType crashReporterQueryType = QuerySelect; const Query *parentQuery = nullptr; ExplainCalc explain; @@ -80,7 +81,7 @@ class NsSelecter { void addSelectResult(uint8_t proc, IdType rowId, IdType properRowId, SelectCtx &sctx, h_vector &aggregators, QueryResults &result, bool preselectForFt); - h_vector getAggregators(const std::vector& aggEntrys, StrictMode strictMode) const; + h_vector getAggregators(const std::vector &aggEntrys, StrictMode strictMode) const; void setLimitAndOffset(ItemRefVector &result, size_t offset, size_t limit); void prepareSortingContext(SortingEntries &sortBy, SelectCtx &ctx, bool isFt, bool availableSelectBySortIndex); static void prepareSortIndex(const NamespaceImpl &, std::string_view column, int &index, bool &skipSortingEntry, StrictMode); diff --git a/cpp_src/core/nsselecter/querypreprocessor.cc b/cpp_src/core/nsselecter/querypreprocessor.cc index e7abaf49e..6070eb478 100644 --- a/cpp_src/core/nsselecter/querypreprocessor.cc +++ b/cpp_src/core/nsselecter/querypreprocessor.cc @@ -1,7 +1,6 @@ #include "querypreprocessor.h" #include "core/index/index.h" -#include "core/index/indextext/indextext.h" #include "core/namespace/namespaceimpl.h" #include "core/nsselecter/joinedselector.h" #include "core/nsselecter/selectiteratorcontainer.h" @@ -9,7 +8,6 @@ #include "core/payload/fieldsset.h" #include "core/query/dsl/dslencoder.h" #include "core/query/queryentry.h" -#include "estl/overloaded.h" #include "nsselecter.h" #include "qresexplainholder.h" #include "substitutionhelpers.h" @@ -22,25 +20,23 @@ QueryPreprocessor::QueryPreprocessor(QueryEntries &&queries, NamespaceImpl *ns, query_{ctx.query}, strictMode_(ctx.inTransaction ? StrictModeNone : ((query_.strictMode == StrictModeNotSet) ? ns_.config_.strictMode : query_.strictMode)), - start_(query_.start), - count_(query_.count), + start_(query_.Offset()), + count_(query_.Limit()), forcedSortOrder_(!query_.forcedSortOrder_.empty()), reqMatchedOnce_(ctx.reqMatchedOnceFlag) { if (forcedSortOrder_ && (start_ > QueryEntry::kDefaultOffset || count_ < QueryEntry::kDefaultLimit)) { - assertrx(!query_.sortingEntries_.empty()); + assertrx_throw(!query_.sortingEntries_.empty()); static const std::vector emptyJoinedSelectors; const auto &sEntry = query_.sortingEntries_[0]; if (SortExpression::Parse(sEntry.expression, emptyJoinedSelectors).ByIndexField()) { - QueryEntry qe; - qe.values.reserve(query_.forcedSortOrder_.size()); - for (const auto &v : query_.forcedSortOrder_) qe.values.push_back(v); - qe.condition = query_.forcedSortOrder_.size() == 1 ? CondEq : CondSet; - qe.index = sEntry.expression; - if (!ns_.getIndexByNameOrJsonPath(qe.index, qe.idxNo)) { - qe.idxNo = IndexValueType::SetByJsonPath; - } + VariantArray values; + values.reserve(query_.forcedSortOrder_.size()); + for (const auto &v : query_.forcedSortOrder_) values.push_back(v); desc_ = sEntry.desc; - Append(desc_ ? OpNot : OpAnd, std::move(qe)); + QueryField fld{sEntry.expression}; + SetQueryField(fld, ns_); + Append(desc_ ? OpNot : OpAnd, std::move(fld), query_.forcedSortOrder_.size() == 1 ? CondEq : CondSet, + std::move(values)); queryEntryAddedByForcedSortOptimization_ = true; } } @@ -58,9 +54,8 @@ void QueryPreprocessor::ExcludeFtQuery(const RdxContext &rdxCtx) { if (queryEntryAddedByForcedSortOptimization_ || Size() <= 1) return; for (auto it = begin(), next = it, endIt = end(); it != endIt; it = next) { ++next; - if (it->HoldsOrReferTo() && it->Value().idxNo != IndexValueType::SetByJsonPath) { - const auto indexNo = it->Value().idxNo; - auto &index = ns_.indexes_[indexNo]; + if (it->HoldsOrReferTo() && it->Value().IsFieldIndexed()) { + auto &index = ns_.indexes_[it->Value().IndexNo()]; if (!IsFastFullText(index->Type())) continue; if (it->operation != OpAnd || (next != endIt && next->operation == OpOr) || !index->EnablePreselectBeforeFt()) break; ftPreselect_ = index->FtPreselect(rdxCtx); @@ -80,16 +75,16 @@ bool QueryPreprocessor::NeedNextEvaluation(unsigned start, unsigned count, bool if (evaluationsCount_++) return false; if (queryEntryAddedByForcedSortOptimization_) { container_.back().operation = desc_ ? OpAnd : OpNot; - assertrx(start <= start_); + assertrx_throw(start <= start_); start_ = start; - assertrx(count <= count_); + assertrx_throw(count <= count_); count_ = count; return count_ || (reqMatchedOnce_ && !matchedAtLeastOnce); } else if (ftEntry_) { if (!matchedAtLeastOnce) return false; qresHolder.BackupContainer(); - start_ = query_.start; - count_ = query_.count; + start_ = query_.Offset(); + count_ = query_.Limit(); forcedSortOrder_ = !query_.forcedSortOrder_.empty(); clear(); Append(OpAnd, std::move(*ftEntry_)); @@ -101,19 +96,19 @@ bool QueryPreprocessor::NeedNextEvaluation(unsigned start, unsigned count, bool return false; } -void QueryPreprocessor::checkStrictMode(const std::string &index, int idxNo) const { - if (idxNo != IndexValueType::SetByJsonPath) return; +void QueryPreprocessor::checkStrictMode(const QueryField &field) const { + if (field.IsFieldIndexed()) return; switch (strictMode_) { case StrictModeIndexes: - throw Error(errParams, + throw Error(errQueryExec, "Current query strict mode allows filtering by indexes only. There are no indexes with name '%s' in namespace '%s'", - index, ns_.name_); + field.FieldName(), ns_.name_); case StrictModeNames: - if (ns_.tagsMatcher_.path2tag(index).empty()) { - throw Error(errParams, + if (field.HaveEmptyField()) { + throw Error(errQueryExec, "Current query strict mode allows filtering by existing fields only. There are no fields with name '%s' in " "namespace '%s'", - index, ns_.name_); + field.FieldName(), ns_.name_); } case StrictModeNotSet: case StrictModeNone: @@ -151,7 +146,7 @@ bool QueryPreprocessor::removeBrackets() { bool QueryPreprocessor::canRemoveBracket(size_t i) const { if (Size(i) < 2) { - throw Error{errParams, "Bracket cannot be empty"}; + throw Error{errQueryExec, "Bracket cannot be empty"}; } const size_t next = Next(i); const OpType op = GetOperation(i); @@ -162,7 +157,7 @@ bool QueryPreprocessor::canRemoveBracket(size_t i) const { size_t QueryPreprocessor::removeBrackets(size_t begin, size_t end) { if (begin != end && GetOperation(begin) == OpOr) { - throw Error{errParams, "First condition cannot be with operation OR"}; + throw Error{errQueryExec, "OR operator in first condition or after left join"}; } size_t deleted = 0; for (size_t i = begin; i < end - deleted; i = Next(i)) { @@ -183,31 +178,23 @@ void QueryPreprocessor::InitIndexNumbers() { ExecuteAppropriateForEach( Skip{}, [this](QueryEntry &entry) { - if (entry.idxNo == IndexValueType::NotSet) { - if (!ns_.getIndexByNameOrJsonPath(entry.index, entry.idxNo)) { - entry.idxNo = IndexValueType::SetByJsonPath; - } + if (!entry.FieldsHaveBeenSet()) { + SetQueryField(entry.FieldData(), ns_); } - checkStrictMode(entry.index, entry.idxNo); + checkStrictMode(entry.FieldData()); }, [this](BetweenFieldsQueryEntry &entry) { - if (entry.firstIdxNo == IndexValueType::NotSet) { - if (!ns_.getIndexByNameOrJsonPath(entry.firstIndex, entry.firstIdxNo)) { - entry.firstIdxNo = IndexValueType::SetByJsonPath; - } - } - checkStrictMode(entry.firstIndex, entry.firstIdxNo); - if (entry.secondIdxNo == IndexValueType::NotSet) { - if (!ns_.getIndexByNameOrJsonPath(entry.secondIndex, entry.secondIdxNo)) { - entry.secondIdxNo = IndexValueType::SetByJsonPath; - } + if (!entry.FieldsHaveBeenSet()) { + SetQueryField(entry.LeftFieldData(), ns_); + SetQueryField(entry.RightFieldData(), ns_); } - checkStrictMode(entry.secondIndex, entry.secondIdxNo); + checkStrictMode(entry.LeftFieldData()); + checkStrictMode(entry.RightFieldData()); }); } size_t QueryPreprocessor::lookupQueryIndexes(uint16_t dst, uint16_t srcBegin, uint16_t srcEnd) { - assertrx(dst <= srcBegin); + assertrx_throw(dst <= srcBegin); h_vector iidx(kMaxIndexes, uint16_t(0)); size_t merged = 0; for (size_t src = srcBegin, nextSrc; src < srcEnd; src = nextSrc) { @@ -221,17 +208,16 @@ size_t QueryPreprocessor::lookupQueryIndexes(uint16_t dst, uint16_t srcBegin, ui return true; }, [&](QueryEntry &entry) { - const bool isIndexField = (entry.idxNo >= 0); - if (isIndexField) { + if (entry.IsFieldIndexed()) { // try merge entries with AND opetator if ((GetOperation(src) == OpAnd) && (nextSrc >= srcEnd || GetOperation(nextSrc) != OpOr)) { - if (size_t(entry.idxNo) >= iidx.size()) { + if (size_t(entry.IndexNo()) >= iidx.size()) { const auto oldSize = iidx.size(); - iidx.resize(size_t(entry.idxNo) + 1); + iidx.resize(entry.IndexNo() + 1); std::fill(iidx.begin() + oldSize, iidx.begin() + iidx.size(), 0); } - auto &iidxRef = iidx[entry.idxNo]; - if (iidxRef > 0 && !ns_.indexes_[entry.idxNo]->Opts().IsArray()) { + auto &iidxRef = iidx[entry.IndexNo()]; + if (iidxRef > 0 && !ns_.indexes_[entry.IndexNo()]->Opts().IsArray()) { if (mergeQueryEntries(iidxRef - 1, src)) { ++merged; return false; @@ -265,9 +251,9 @@ size_t QueryPreprocessor::lookupQueryIndexes(uint16_t dst, uint16_t srcBegin, ui void QueryPreprocessor::CheckUniqueFtQuery() const { bool found = false; ExecuteAppropriateForEach(Skip{}, [&](const QueryEntry &qe) { - if (qe.idxNo != IndexValueType::SetByJsonPath && IsFullText(ns_.indexes_[qe.idxNo]->Type())) { + if (qe.IsFieldIndexed() && IsFullText(ns_.indexes_[qe.IndexNo()]->Type())) { if (found) { - throw Error{errParams, "Query cannot contain more than one full text condition"}; + throw Error{errQueryExec, "Query cannot contain more than one full text condition"}; } else { found = true; } @@ -277,8 +263,8 @@ void QueryPreprocessor::CheckUniqueFtQuery() const { bool QueryPreprocessor::ContainsFullTextIndexes() const { for (auto it = cbegin().PlainIterator(), end = cend().PlainIterator(); it != end; ++it) { - if (it->HoldsOrReferTo() && it->Value().idxNo != IndexValueType::SetByJsonPath && - IsFullText(ns_.indexes_[it->Value().idxNo]->Type())) { + if (it->HoldsOrReferTo() && it->Value().IsFieldIndexed() && + IsFullText(ns_.indexes_[it->Value().IndexNo()]->Type())) { return true; } } @@ -321,11 +307,14 @@ static void createCompositeKeyValues(const h_vector } } -static void createCompositeKeyValues(const h_vector, 4> &values, const PayloadType &plType, - VariantArray &ret) { +static VariantArray createCompositeKeyValues(const h_vector, 4> &values, const PayloadType &plType, + uint32_t resultSetSize) { PayloadValue d(plType.TotalSize()); Payload pl(plType, d); + VariantArray ret; + ret.reserve(resultSetSize); createCompositeKeyValues(values, plType, pl, ret, 0); + return ret; } size_t QueryPreprocessor::substituteCompositeIndexes(const size_t from, const size_t to) { @@ -350,15 +339,16 @@ size_t QueryPreprocessor::substituteCompositeIndexes(const size_t from, const si continue; } auto &qe = Get(cur); - if ((qe.condition != CondEq && qe.condition != CondSet) || qe.idxNo >= ns_.payloadType_.NumFields() || qe.idxNo < 0) { + if ((qe.Condition() != CondEq && qe.Condition() != CondSet) || !qe.IsFieldIndexed() || + qe.IndexNo() >= ns_.payloadType_.NumFields()) { continue; } - const std::vector *found = getCompositeIndex(qe.idxNo); + const std::vector *found = getCompositeIndex(qe.IndexNo()); if (!found || found->empty()) { continue; } - searcher.Add(qe.idxNo, *found, cur); + searcher.Add(qe.IndexNo(), *found, cur); } EntriesRanges deleteRanges; @@ -371,14 +361,13 @@ size_t QueryPreprocessor::substituteCompositeIndexes(const size_t from, const si uint32_t maxSetSize = 0; for (auto i : res.entries) { auto &qe = Get(i); - if rx_unlikely (!res.fields.contains(qe.idxNo)) { + if rx_unlikely (!res.fields.contains(qe.IndexNo())) { throw Error(errLogic, "Error during composite index's fields substitution (this should not happen)"); } - - maxSetSize = std::max(maxSetSize, qe.values.size()); - resultSetSize = (resultSetSize == 0) ? qe.values.size() : (resultSetSize * qe.values.size()); + maxSetSize = std::max(maxSetSize, qe.Values().size()); + resultSetSize = (resultSetSize == 0) ? qe.Values().size() : (resultSetSize * qe.Values().size()); } - static const CompositeValuesCountLimits kCompositeSetLimits; + constexpr static CompositeValuesCountLimits kCompositeSetLimits; if (resultSetSize != maxSetSize) { // Do not perform substitution if result set size becoms larger than initial indexes set size // and this size is greater than limit @@ -390,22 +379,17 @@ size_t QueryPreprocessor::substituteCompositeIndexes(const size_t from, const si } for (auto i : res.entries) { auto &qe = Get(i); - const auto idxKeyType = ns_.indexes_[qe.idxNo]->KeyType(); - for (auto &v : qe.values) { - v.convert(idxKeyType); - } - values.emplace_back(qe.idxNo, std::move(qe.values)); + qe.ConvertValuesToFieldType(); + const int idxNo = qe.IndexNo(); + values.emplace_back(idxNo, std::move(qe).Values()); } { - QueryEntry ce(CondSet, ns_.indexes_[res.idx]->Name(), res.idx); - ce.values.reserve(resultSetSize); - createCompositeKeyValues(values, ns_.payloadType_, ce.values); - if (ce.values.size() == 1) { - ce.condition = CondEq; - } + VariantArray qValues = createCompositeKeyValues(values, ns_.payloadType_, resultSetSize); const auto first = res.entries.front(); SetOperation(OpAnd, first); - container_[first].SetValue(std::move(ce)); + QueryField fld{ns_.indexes_[res.idx]->Name()}; + setQueryIndex(fld, res.idx, ns_); + container_[first].Emplace(std::move(fld), qValues.size() == 1 ? CondEq : CondSet, std::move(qValues)); } deleteRanges.Add(span(res.entries.data() + 1, res.entries.size() - 1)); resIdx = searcher.RemoveUsedAndGetNext(resIdx); @@ -417,29 +401,14 @@ size_t QueryPreprocessor::substituteCompositeIndexes(const size_t from, const si return deleted; } -void QueryPreprocessor::convertWhereValues(QueryEntry *qe) const { - const FieldsSet *fields = nullptr; - KeyValueType keyType{KeyValueType::Undefined{}}; - const bool isIndexField = (qe->idxNo != IndexValueType::SetByJsonPath); - if (isIndexField) { - keyType = ns_.indexes_[qe->idxNo]->SelectKeyType(); - fields = &ns_.indexes_[qe->idxNo]->Fields(); - } - if (!keyType.Is()) { - if (qe->condition != CondDWithin) { - for (auto &key : qe->values) { - key.convert(keyType, &ns_.payloadType_, fields); - } - } - } -} +void QueryPreprocessor::convertWhereValues(QueryEntry &qe) const { qe.ConvertValuesToFieldType(ns_.payloadType_); } void QueryPreprocessor::convertWhereValues(QueryEntries::iterator begin, QueryEntries::iterator end) const { for (auto it = begin; it != end; ++it) { it->InvokeAppropriate( Skip{}, [this, &it](const QueryEntriesBracket &) { convertWhereValues(it.begin(), it.end()); }, - [this](QueryEntry &qe) { convertWhereValues(&qe); }); + [this](QueryEntry &qe) { convertWhereValues(qe); }); } } @@ -481,12 +450,12 @@ void QueryPreprocessor::findMaxIndex(QueryEntries::const_iterator begin, QueryEn return FoundIndexInfo(); }, [this](const QueryEntry &entry) -> FoundIndexInfo { - if (entry.idxNo != IndexValueType::SetByJsonPath && !entry.distinct) { - const auto idxPtr = ns_.indexes_[entry.idxNo].get(); + if (entry.IsFieldIndexed() && !entry.Distinct()) { + const auto idxPtr = ns_.indexes_[entry.IndexNo()].get(); if (idxPtr->IsOrdered() && !idxPtr->Opts().IsArray()) { - if (IsOrderedCondition(entry.condition)) { + if (IsOrderedCondition(entry.Condition())) { return FoundIndexInfo{idxPtr, FoundIndexInfo::ConditionType::Compatible}; - } else if (entry.condition == CondAny || entry.values.size() > 1) { + } else if (entry.Condition() == CondAny || entry.Values().size() > 1) { return FoundIndexInfo{idxPtr, FoundIndexInfo::ConditionType::Incompatible}; } } @@ -510,42 +479,42 @@ void QueryPreprocessor::findMaxIndex(QueryEntries::const_iterator begin, QueryEn bool QueryPreprocessor::mergeQueryEntries(size_t lhs, size_t rhs) { QueryEntry *lqe = &Get(lhs); QueryEntry &rqe = Get(rhs); - if ((lqe->condition == CondEq || lqe->condition == CondSet) && (rqe.condition == CondEq || rqe.condition == CondSet)) { + if ((lqe->Condition() == CondEq || lqe->Condition() == CondSet) && (rqe.Condition() == CondEq || rqe.Condition() == CondSet)) { // intersect 2 queryentries on the same index - if rx_unlikely (lqe->values.empty()) { + if rx_unlikely (lqe->Values().empty()) { return true; } if (container_[lhs].IsRef()) { container_[lhs].SetValue(const_cast(*lqe)); lqe = &Get(lhs); } + const bool distinct = lqe->Distinct() || rqe.Distinct(); VariantArray setValues; - if (rx_likely(!rqe.values.empty())) { - convertWhereValues(lqe); - convertWhereValues(&rqe); - VariantArray *first = &lqe->values; - VariantArray *second = &rqe.values; - if (lqe->values.size() > rqe.values.size()) { - std::swap(first, second); - } - setValues.reserve(first->size()); + if (rx_likely(!rqe.Values().empty())) { + convertWhereValues(*lqe); + convertWhereValues(rqe); + auto &&[first, second] = lqe->Values().size() < rqe.Values().size() + ? std::make_pair(std::move(*lqe).Values(), std::move(rqe).Values()) + : std::make_pair(std::move(rqe).Values(), std::move(*lqe).Values()); + + setValues.reserve(first.size()); constexpr size_t kMinArraySizeToUseHashSet = 250; - if (second->size() < kMinArraySizeToUseHashSet) { + if (second.size() < kMinArraySizeToUseHashSet) { // Intersect via binary search + sort for small vectors - std::sort(first->begin(), first->end()); - for (auto &&v : *second) { - if (std::binary_search(first->begin(), first->end(), v)) { + std::sort(first.begin(), first.end()); + for (auto &&v : second) { + if (std::binary_search(first.begin(), first.end(), v)) { setValues.emplace_back(std::move(v)); } } } else { // Intersect via hash_set for large vectors reindexer::fast_hash_set set; - set.reserve(first->size() * 2); - for (auto &&v : *first) { + set.reserve(first.size() * 2); + for (auto &&v : first) { set.emplace(std::move(v)); } - for (auto &&v : *second) { + for (auto &&v : second) { if (set.erase(v)) { setValues.emplace_back(std::move(v)); } @@ -553,27 +522,26 @@ bool QueryPreprocessor::mergeQueryEntries(size_t lhs, size_t rhs) { } } - lqe->values = std::move(setValues); - lqe->condition = (lqe->values.size() == 1) ? CondEq : CondSet; - lqe->distinct |= rqe.distinct; + lqe->SetCondAndValues(CondSet, std::move(setValues)); + lqe->Distinct(distinct); return true; - } else if (rqe.condition == CondAny) { - if (!lqe->distinct && rqe.distinct) { + } else if (rqe.Condition() == CondAny) { + if (!lqe->Distinct() && rqe.Distinct()) { if (container_[lhs].IsRef()) { container_[lhs].SetValue(const_cast(*lqe)); lqe = &Get(lhs); } - lqe->distinct = true; + lqe->Distinct(true); } return true; - } else if (lqe->condition == CondAny) { - const bool distinct = lqe->distinct || rqe.distinct; + } else if (lqe->Condition() == CondAny) { + const bool distinct = lqe->Distinct() || rqe.Distinct(); if (container_[rhs].IsRef()) { container_[lhs].SetValue(const_cast(rqe)); } else { container_[lhs].SetValue(std::move(rqe)); } - Get(lhs).distinct = distinct; + Get(lhs).Distinct(distinct); return true; } @@ -584,19 +552,16 @@ void QueryPreprocessor::AddDistinctEntries(const h_vector &aggreg bool wasAdded = false; for (auto &ag : aggregators) { if (ag.Type() != AggDistinct) continue; - QueryEntry qe; - assertrx(ag.Names().size() == 1); - qe.index = ag.Names()[0]; - qe.condition = CondAny; - qe.distinct = true; - Append(wasAdded ? OpOr : OpAnd, std::move(qe)); + assertrx_throw(ag.Names().size() == 1); + Append(wasAdded ? OpOr : OpAnd, ag.Names()[0], QueryEntry::DistinctTag{}); wasAdded = true; } } -void QueryPreprocessor::fillQueryEntryFromOnCondition(QueryEntry &queryEntry, std::string &explainStr, AggType &oAggType, - NamespaceImpl &rightNs, Query joinQuery, std::string joinIndex, CondType condition, - KeyValueType valuesType, const RdxContext &rdxCtx) { +std::pair QueryPreprocessor::queryValuesFromOnCondition(std::string &explainStr, AggType &oAggType, + NamespaceImpl &rightNs, Query joinQuery, + const QueryJoinEntry &joinEntry, CondType condition, + const RdxContext &rdxCtx) { size_t limit; const auto &rNsCfg = rightNs.Config(); if (rNsCfg.maxPreselectSize == 0) { @@ -608,25 +573,25 @@ void QueryPreprocessor::fillQueryEntryFromOnCondition(QueryEntry &queryEntry, st std::min(std::max(rNsCfg.minPreselectSize, rightNs.ItemsCount() * rNsCfg.maxPreselectPart), rNsCfg.maxPreselectSize); } joinQuery.explain_ = query_.explain_; - joinQuery.count = limit + 2; - joinQuery.start = 0; + joinQuery.Limit(limit + 2); + joinQuery.Offset(QueryEntry::kDefaultOffset); joinQuery.sortingEntries_.clear(); joinQuery.forcedSortOrder_.clear(); joinQuery.aggregations_.clear(); switch (condition) { case CondEq: case CondSet: - joinQuery.Distinct(std::move(joinIndex)); + joinQuery.Distinct(joinEntry.RightFieldName()); oAggType = AggType::AggDistinct; break; case CondLt: case CondLe: - joinQuery.Aggregate(AggMax, {std::move(joinIndex)}); + joinQuery.Aggregate(AggMax, {joinEntry.RightFieldName()}); oAggType = AggType::AggMax; break; case CondGt: case CondGe: - joinQuery.Aggregate(AggMin, {std::move(joinIndex)}); + joinQuery.Aggregate(AggMin, {joinEntry.RightFieldName()}); oAggType = AggType::AggMin; break; case CondAny: @@ -635,104 +600,87 @@ void QueryPreprocessor::fillQueryEntryFromOnCondition(QueryEntry &queryEntry, st case CondEmpty: case CondLike: case CondDWithin: - throw Error(errParams, "Unsupported condition in ON statment: %s", CondTypeToStr(condition)); + throw Error(errQueryExec, "Unsupported condition in ON statment: %s", CondTypeToStr(condition)); } SelectCtx ctx{joinQuery, nullptr}; QueryResults qr; rightNs.Select(qr, ctx, rdxCtx); - if (qr.Count() > limit) return; - assertrx(qr.aggregationResults.size() == 1); + if (qr.Count() > limit) return {CondAny, {}}; + assertrx_throw(qr.aggregationResults.size() == 1); + auto &aggRes = qr.aggregationResults[0]; explainStr = qr.explainResults; switch (condition) { case CondEq: case CondSet: { - assertrx(qr.aggregationResults[0].type == AggDistinct); - queryEntry.values.reserve(qr.aggregationResults[0].distincts.size()); - assertrx(qr.aggregationResults[0].distinctsFields.size() == 1); - const auto field = qr.aggregationResults[0].distinctsFields[0]; - for (Variant &distValue : qr.aggregationResults[0].distincts) { + assertrx_throw(aggRes.type == AggDistinct); + VariantArray values; + values.reserve(aggRes.distincts.size()); + for (Variant &distValue : aggRes.distincts) { if (distValue.Type().Is()) { - ConstPayload pl(qr.aggregationResults[0].payloadType, distValue.operator const PayloadValue &()); - VariantArray v; - if (field == IndexValueType::SetByJsonPath) { - assertrx(qr.aggregationResults[0].distinctsFields.getTagsPathsLength() == 1); - pl.GetByJsonPath(qr.aggregationResults[0].distinctsFields.getTagsPath(0), v, valuesType); - } else { - pl.Get(field, v); - } - assertrx(v.size() == 1); - queryEntry.values.emplace_back(std::move(v[0])); + ConstPayload pl(aggRes.payloadType, distValue.operator const PayloadValue &()); + values.emplace_back(pl.GetComposite(aggRes.distinctsFields, joinEntry.RightCompositeFieldsTypes())); } else { - queryEntry.values.emplace_back(std::move(distValue)); + values.emplace_back(std::move(distValue)); } } - queryEntry.condition = (queryEntry.values.size() == 1) ? CondEq : CondSet; - break; + return {CondSet, std::move(values)}; } case CondLt: case CondLe: case CondGt: case CondGe: - if (auto value = qr.aggregationResults[0].GetValue()) { - queryEntry.condition = condition; - queryEntry.values.emplace_back(*value); + if (auto value = aggRes.GetValue()) { + return {condition, {Variant{*value}}}; + } else { + return {CondAny, {}}; } - break; case CondAny: case CondRange: case CondAllSet: case CondEmpty: case CondLike: case CondDWithin: - throw Error(errParams, "Unsupported condition in ON statment: %s", CondTypeToStr(condition)); + default: + throw Error(errQueryExec, "Unsupported condition in ON statment: %s", CondTypeToStr(condition)); } } -template -void QueryPreprocessor::fillQueryEntryFromOnCondition(QueryEntry &queryEntry, std::string_view joinIndex, CondType condition, - const JoinedSelector &joinedSelector, KeyValueType valuesType, const int rightIdxNo, - const CollateOpts &collate) { - JoinPreResult::Values &values = joinedSelector.preResult_->values; +std::pair QueryPreprocessor::queryValuesFromOnCondition(CondType condition, const QueryJoinEntry &joinEntry, + const JoinedSelector &joinedSelector, + const CollateOpts &collate) { switch (condition) { case CondEq: - case CondSet: { - joinedSelector.readValuesFromPreResult(queryEntry.values, valuesType, rightIdxNo, joinIndex); - queryEntry.condition = (queryEntry.values.size() == 1) ? CondEq : CondSet; - return; - } + case CondSet: + return {CondSet, joinedSelector.readValuesFromPreResult(joinEntry)}; case CondLt: case CondLe: case CondGt: case CondGe: { - queryEntry.condition = condition; - VariantArray buffer; + const JoinPreResult::Values &values = joinedSelector.preResult_->values; + VariantArray buffer, keyValues; for (const ItemRef &item : values) { - buffer.clear(); - assertrx(!item.Value().IsFree()); + assertrx_throw(!item.Value().IsFree()); const ConstPayload pl{values.payloadType, item.Value()}; - if constexpr (byJsonPath) { - pl.GetByJsonPath(joinIndex, values.tagsMatcher, buffer, valuesType); - } else { - pl.Get(rightIdxNo, buffer); - } + pl.GetByFieldsSet(joinEntry.RightFields(), buffer, joinEntry.RightFieldType(), joinEntry.RightCompositeFieldsTypes()); for (Variant &v : buffer) { - if (queryEntry.values.empty()) { - queryEntry.values.emplace_back(std::move(v)); + if (keyValues.empty()) { + keyValues.emplace_back(std::move(v)); } else { - const auto cmp = queryEntry.values[0].Compare(v, collate); + const auto cmp = keyValues[0].Compare(v, collate); if (condition == CondLt || condition == CondLe) { if (cmp < 0) { - queryEntry.values[0] = std::move(v); + keyValues[0] = std::move(v); } } else { if (cmp > 0) { - queryEntry.values[0] = std::move(v); + keyValues[0] = std::move(v); } } } } } + return {condition, std::move(keyValues)}; } break; case CondAny: case CondRange: @@ -740,7 +688,8 @@ void QueryPreprocessor::fillQueryEntryFromOnCondition(QueryEntry &queryEntry, st case CondEmpty: case CondLike: case CondDWithin: - throw Error(errParams, "Unsupported condition in ON statment: %s", CondTypeToStr(condition)); + default: + throw Error(errQueryExec, "Unsupported condition in ON statment: %s", CondTypeToStr(condition)); } } @@ -782,8 +731,7 @@ size_t QueryPreprocessor::injectConditionsFromJoins(size_t from, size_t to, Join assertrx_throw(to <= container_.size()); }, [&](const JoinQueryEntry &jqe) { - assertrx(js.size() > jqe.joinIndex); - + assertrx_throw(js.size() > jqe.joinIndex); JoinedSelector &joinedSelector = js[jqe.joinIndex]; const bool byValues = joinedSelector.PreResult() && joinedSelector.PreResult()->dataMode == JoinPreResult::ModeValues; @@ -803,14 +751,13 @@ size_t QueryPreprocessor::injectConditionsFromJoins(size_t from, size_t to, Join return; } } - const auto &joinEntries = joinedSelector.joinQuery_.joinEntries_; // LeftJoin-s shall not be in QueryEntries container_ by construction - assertrx(joinedSelector.Type() == InnerJoin || joinedSelector.Type() == OrInnerJoin); + assertrx_throw(joinedSelector.Type() == InnerJoin || joinedSelector.Type() == OrInnerJoin); // Checking if we have anything to inject into main Where clause bool foundANDOrOR = false; for (const auto &je : joinEntries) { - if (je.op_ != OpNot) { + if (je.Operation() != OpNot) { foundANDOrOR = true; break; } @@ -822,7 +769,7 @@ size_t QueryPreprocessor::injectConditionsFromJoins(size_t from, size_t to, Join OpType op = GetOperation(cur); if (joinedSelector.Type() == OrInnerJoin) { - if (op == OpNot) throw Error(errParams, "OR INNER JOIN with operation NOT"); + if (op == OpNot) throw Error(errQueryExec, "OR INNER JOIN with operation NOT"); op = OpOr; joinedSelector.SetType(InnerJoin); } @@ -832,6 +779,8 @@ size_t QueryPreprocessor::injectConditionsFromJoins(size_t from, size_t to, Join // !!!Warning jqe reference will be invalidated after EncloseInBracket EncloseInBracket(cur, cur + 1, op); ++cur; + ++to; + ++injectedCount; explainJoinOn.ReserveOnEntries(joinEntries.size()); @@ -840,12 +789,10 @@ size_t QueryPreprocessor::injectConditionsFromJoins(size_t from, size_t to, Join size_t orChainLength = 0; for (size_t i = 0, s = joinEntries.size(); i < s; ++i) { const QueryJoinEntry &joinEntry = joinEntries[i]; - auto explainEntry = explainJoinOn.AppendOnEntryExplain(); explainEntry.InitialCondition(joinEntry, joinedSelector); - - CondType condition = joinEntry.condition_; - OpType operation = joinEntry.op_; + CondType condition = joinEntry.Condition(); + OpType operation = joinEntry.Operation(); switch (operation) { case OpNot: orChainLength = 0; @@ -873,7 +820,7 @@ size_t QueryPreprocessor::injectConditionsFromJoins(size_t from, size_t to, Join case CondEmpty: case CondLike: case CondDWithin: - throw Error(errParams, "Unsupported condition in ON statment: %s", CondTypeToStr(condition)); + throw Error(errQueryExec, "Unsupported condition in ON statment: %s", CondTypeToStr(condition)); } operation = OpAnd; break; @@ -888,67 +835,67 @@ size_t QueryPreprocessor::injectConditionsFromJoins(size_t from, size_t to, Join orChainLength = 0; break; } - - QueryEntry newEntry; - newEntry.index = joinEntry.index_; - newEntry.idxNo = IndexValueType::SetByJsonPath; - KeyValueType valuesType = KeyValueType::Undefined{}; - CollateOpts collate; - if (ns_.getIndexByNameOrJsonPath(newEntry.index, newEntry.idxNo)) { - const Index &index = *ns_.indexes_[newEntry.idxNo]; - valuesType = index.SelectKeyType(); - collate = index.Opts().collateOpts_; - } - + CondType queryCondition{CondAny}; + VariantArray values; if (byValues) { - assertrx(joinedSelector.itemQuery_.entries.HoldsOrReferTo(i)); - const QueryEntry &qe = joinedSelector.itemQuery_.entries.Get(i); - assertrx(qe.index == joinEntry.joinIndex_); - const int rightIdxNo = qe.idxNo; - if (rightIdxNo == IndexValueType::SetByJsonPath) { - fillQueryEntryFromOnCondition(newEntry, joinEntry.joinIndex_, condition, joinedSelector, valuesType, - rightIdxNo, collate); - } else { - fillQueryEntryFromOnCondition(newEntry, joinEntry.joinIndex_, condition, joinedSelector, valuesType, - rightIdxNo, collate); + assertrx_throw(joinedSelector.itemQuery_.entries.HoldsOrReferTo(i)); + assertrx_throw(joinedSelector.itemQuery_.entries.Get(i).FieldName() == joinEntry.RightFieldName()); + CollateOpts collate; + if (joinEntry.IsLeftFieldIndexed()) { + collate = ns_.indexes_[joinEntry.LeftIdxNo()]->Opts().collateOpts_; } + std::tie(queryCondition, values) = queryValuesFromOnCondition(condition, joinEntry, joinedSelector, collate); } else { bool skip = false; switch (condition) { + case CondAny: + case CondEmpty: + case CondLike: + case CondDWithin: + explainEntry.Skipped("Skipped due to unsupperted on condition"sv); + skip = true; + break; + case CondRange: case CondLt: case CondLe: case CondGt: - case CondGe: { - const QueryEntry &qe = joinedSelector.itemQuery_.entries.Get(i); - skip = qe.idxNo != IndexValueType::SetByJsonPath && joinedSelector.RightNs()->indexes_[qe.idxNo]->IsUuid(); - if (skip) { - explainEntry.Skipped("Skipped due to condition Lt|Le|Gt|Ge with UUID index field."sv); - } + case CondGe: + joinedSelector.itemQuery_.entries.Get(i).FieldType().EvaluateOneOf( + [&skip, + &explainEntry](OneOf) noexcept { + skip = true; + explainEntry.Skipped( + "Skipped due to condition Lt|Le|Gt|Ge|Range with not indexed or not numeric field."sv); + }, + [](OneOf) noexcept { + }); break; - } case CondEq: case CondSet: case CondAllSet: - case CondAny: - case CondEmpty: - case CondRange: - case CondLike: - case CondDWithin: + joinedSelector.itemQuery_.entries.Get(i).FieldType().EvaluateOneOf( + [&skip, &explainEntry](OneOf) noexcept { + skip = true; + explainEntry.Skipped("Skipped due to condition Eq|Set|AllSet with composite index."sv); + }, + [](OneOf) noexcept {}); break; } if (!skip) { std::string explainSelect; AggType selectAggType; - fillQueryEntryFromOnCondition(newEntry, explainSelect, selectAggType, *joinedSelector.RightNs(), - joinedSelector.JoinQuery(), joinEntry.joinIndex_, condition, valuesType, rdxCtx); + std::tie(queryCondition, values) = + queryValuesFromOnCondition(explainSelect, selectAggType, *joinedSelector.RightNs(), + joinedSelector.JoinQuery(), joinEntry, condition, rdxCtx); explainEntry.ExplainSelect(std::move(explainSelect), selectAggType); } } - - if (!newEntry.values.empty()) { - explainEntry.Succeed(newEntry); - - Insert(cur, operation, std::move(newEntry)); + if (!values.empty()) { + Insert(cur, operation, QueryEntry{QueryField(joinEntry.LeftFieldData()), queryCondition, std::move(values)}); + explainEntry.Succeed(Get(cur)); ++cur; ++count; prevIsSkipped = false; @@ -956,6 +903,7 @@ size_t QueryPreprocessor::injectConditionsFromJoins(size_t from, size_t to, Join explainEntry.Skipped("Skipped as cannot obtain values from right namespace."sv); if (operation == OpOr) { Erase(cur - orChainLength, cur); + cur -= orChainLength; count -= orChainLength; // Marking On-injections as fail for removed entries. explainJoinOn.FailOnEntriesAsOrChain(orChainLength); @@ -971,8 +919,10 @@ size_t QueryPreprocessor::injectConditionsFromJoins(size_t from, size_t to, Join [this, cur, count, &js](WrSerializer &ser) { briefDump(cur - count, Next(cur - count), js, ser); }); ++cur; - injectedCount += count + 2; - to += count + 2; + injectedCount += count + 1; + to += count + 1; + } else { + explainJoinOn.Skipped("Skipped as there are no injected conditions"); } }); } @@ -1020,7 +970,7 @@ class JoinOnExplainEnabled { explainEntry_.succeed = true; explainEntry_.reason = ""; explainEntry_.newCond = newEntry.DumpBrief(); - explainEntry_.valuesCount = newEntry.values.size(); + explainEntry_.valuesCount = newEntry.Values().size(); } void Skipped(std::string_view reason) noexcept { @@ -1090,4 +1040,34 @@ class JoinOnExplainEnabled { time_point_t startTime_; }; +void QueryPreprocessor::setQueryIndex(QueryField &qField, int idxNo, const NamespaceImpl &ns) { + const auto &idx = *ns.indexes_[idxNo]; + std::vector compositeFieldsTypes; + if (idxNo >= ns.indexes_.firstCompositePos()) { +#ifndef NDEBUG + const bool ftIdx = IsFullText(idx.Type()); +#endif + for (const auto f : ns.indexes_[idxNo]->Fields()) { + if (f == IndexValueType::SetByJsonPath) { + // not indexed fields allowed only in ft composite indexes + assertrx_throw(ftIdx); + compositeFieldsTypes.push_back(KeyValueType::String{}); + } else { + assertrx_throw(f <= ns.indexes_.firstCompositePos()); + compositeFieldsTypes.push_back(ns.indexes_[f]->SelectKeyType()); + } + } + } + qField.SetIndexData(idxNo, FieldsSet(idx.Fields()), idx.KeyType(), idx.SelectKeyType(), std::move(compositeFieldsTypes)); +} + +void QueryPreprocessor::SetQueryField(QueryField &qField, const NamespaceImpl &ns) { + int idxNo = IndexValueType::SetByJsonPath; + if (ns.getIndexByNameOrJsonPath(qField.FieldName(), idxNo)) { + setQueryIndex(qField, idxNo, ns); + } else { + qField.SetField({ns.tagsMatcher_.path2tag(qField.FieldName())}); + } +} + } // namespace reindexer diff --git a/cpp_src/core/nsselecter/querypreprocessor.h b/cpp_src/core/nsselecter/querypreprocessor.h index 907636251..ba79d4c10 100644 --- a/cpp_src/core/nsselecter/querypreprocessor.h +++ b/cpp_src/core/nsselecter/querypreprocessor.h @@ -65,6 +65,7 @@ class QueryPreprocessor : private QueryEntries { return std::move(*ftPreselect_); } bool IsFtPreselected() const noexcept { return ftPreselect_ && !ftEntry_; } + static void SetQueryField(QueryField &, const NamespaceImpl &); private: struct FoundIndexInfo { @@ -78,6 +79,7 @@ class QueryPreprocessor : private QueryEntries { uint64_t isFitForSortOptimization : 1; }; + static void setQueryIndex(QueryField &, int idxNo, const NamespaceImpl &); [[nodiscard]] SortingEntries detectOptimalSortOrder() const; bool forcedStage() const noexcept { return evaluationsCount_ == (desc_ ? 1 : 0); } size_t lookupQueryIndexes(uint16_t dst, uint16_t srcBegin, uint16_t srcEnd); @@ -85,7 +87,7 @@ class QueryPreprocessor : private QueryEntries { bool mergeQueryEntries(size_t lhs, size_t rhs); const std::vector *getCompositeIndex(int field) const; void convertWhereValues(QueryEntries::iterator begin, QueryEntries::iterator end) const; - void convertWhereValues(QueryEntry *) const; + void convertWhereValues(QueryEntry &) const; [[nodiscard]] const Index *findMaxIndex(QueryEntries::const_iterator begin, QueryEntries::const_iterator end) const; void findMaxIndex(QueryEntries::const_iterator begin, QueryEntries::const_iterator end, h_vector &foundIndexes) const; @@ -94,12 +96,13 @@ class QueryPreprocessor : private QueryEntries { */ template size_t injectConditionsFromJoins(size_t from, size_t to, JoinedSelectors &, OnConditionInjections &, const RdxContext &); - void fillQueryEntryFromOnCondition(QueryEntry &, std::string &outExplainStr, AggType &, NamespaceImpl &rightNs, Query joinQuery, - std::string joinIndex, CondType condition, KeyValueType, const RdxContext &); - template - void fillQueryEntryFromOnCondition(QueryEntry &, std::string_view joinIndex, CondType condition, const JoinedSelector &, KeyValueType, - int rightIdxNo, const CollateOpts &); - void checkStrictMode(const std::string &index, int idxNo) const; + [[nodiscard]] std::pair queryValuesFromOnCondition(std::string &outExplainStr, AggType &, + NamespaceImpl &rightNs, Query joinQuery, + const QueryJoinEntry &, CondType condition, + const RdxContext &); + [[nodiscard]] std::pair queryValuesFromOnCondition(CondType condition, const QueryJoinEntry &, + const JoinedSelector &, const CollateOpts &); + void checkStrictMode(const QueryField &) const; bool removeBrackets(); size_t removeBrackets(size_t begin, size_t end); bool canRemoveBracket(size_t i) const; diff --git a/cpp_src/core/nsselecter/selectiteratorcontainer.cc b/cpp_src/core/nsselecter/selectiteratorcontainer.cc index a926d6fb9..f34f73137 100644 --- a/cpp_src/core/nsselecter/selectiteratorcontainer.cc +++ b/cpp_src/core/nsselecter/selectiteratorcontainer.cc @@ -188,46 +188,15 @@ void SelectIteratorContainer::SetExpectMaxIterations(int expectedIterations) { SelectKeyResults SelectIteratorContainer::processQueryEntry(const QueryEntry &qe, const NamespaceImpl &ns, StrictMode strictMode) { SelectKeyResults selectResults; - FieldsSet fields; - TagsPath tagsPath = ns.tagsMatcher_.path2tag(qe.index); - - // TODO: it may be necessary to remove or change this switch after QueryEntry refactoring - switch (qe.condition) { - case CondAny: - case CondEmpty: - case CondAllSet: - case CondEq: - case CondSet: - break; - case CondRange: - case CondDWithin: - if (qe.values.size() != 2) { - throw Error(errParams, "For condition %s required exactly 2 arguments, but provided %d", CondTypeToStr(qe.condition), - qe.values.size()); - } - break; - case CondLt: - case CondLe: - case CondGt: - case CondGe: - case CondLike: - if (qe.values.size() != 1) { - throw Error(errParams, "For condition %s required exactly 1 argument, but provided %d", CondTypeToStr(qe.condition), - qe.values.size()); - } - break; - } - - if (!tagsPath.empty()) { + if (!qe.HaveEmptyField()) { SelectKeyResult comparisonResult; - fields.push_back(tagsPath); - comparisonResult.comparators_.emplace_back(qe.condition, KeyValueType::Null{}, qe.values, false, qe.distinct, ns.payloadType_, - fields, nullptr, CollateOpts()); + comparisonResult.comparators_.emplace_back(qe.Condition(), KeyValueType::Null{}, qe.Values(), false, qe.Distinct(), ns.payloadType_, + qe.Fields(), nullptr, CollateOpts()); selectResults.emplace_back(std::move(comparisonResult)); } else if (strictMode == StrictModeNone) { SelectKeyResult res; // Ignore non-index/non-existing fields - if (qe.condition == CondEmpty) { + if (qe.Condition() == CondEmpty) { res.emplace_back(SingleSelectKeyResult(IdType(0), IdType(ns.items_.size()))); } else { res.emplace_back(SingleSelectKeyResult(IdType(0), IdType(0))); @@ -237,33 +206,30 @@ SelectKeyResults SelectIteratorContainer::processQueryEntry(const QueryEntry &qe throw Error( errParams, "Current query strict mode allows filtering by existing fields only. There are no fields with name '%s' in namespace '%s'", - qe.index, ns.name_); + qe.FieldName(), ns.name_); } return selectResults; } template -void SelectIteratorContainer::processField(FieldsComparator &fc, std::string_view field, int idxNo, const NamespaceImpl &ns) const { - const bool nonIndexField = (idxNo == IndexValueType::SetByJsonPath); - if (nonIndexField) { - TagsPath tagsPath = ns.tagsMatcher_.path2tag(field); - if (tagsPath.empty()) { - throw Error{errQueryExec, "Only existing fields can be compared. There are no fields with name '%s' in namespace '%s'", field, - ns.name_}; - } +void SelectIteratorContainer::processField(FieldsComparator &fc, const QueryField &field, const NamespaceImpl &ns) const { + if (field.IsFieldIndexed()) { + auto &index = ns.indexes_[field.IndexNo()]; if constexpr (left) { - fc.SetLeftField(tagsPath); + fc.SetCollateOpts(index->Opts().collateOpts_); + fc.SetLeftField(field.Fields(), field.FieldType(), index->Opts().IsArray()); } else { - fc.SetRightField(tagsPath); + fc.SetRightField(field.Fields(), field.FieldType(), index->Opts().IsArray()); } + } else if (field.HaveEmptyField()) { + throw Error{errQueryExec, "Only existing fields can be compared. There are no fields with name '%s' in namespace '%s'", + field.FieldName(), ns.name_}; } else { - auto &index = ns.indexes_[idxNo]; if constexpr (left) { - fc.SetCollateOpts(index->Opts().collateOpts_); - fc.SetLeftField(index->Fields(), index->KeyType(), index->Opts().IsArray()); + fc.SetLeftField(field.Fields()); } else { - fc.SetRightField(index->Fields(), index->KeyType(), index->Opts().IsArray()); + fc.SetRightField(field.Fields()); } } } @@ -272,7 +238,7 @@ SelectKeyResults SelectIteratorContainer::processQueryEntry(const QueryEntry &qe unsigned sortId, bool isQueryFt, SelectFunction::Ptr &selectFnc, bool &isIndexFt, bool &isIndexSparse, FtCtx::Ptr &ftCtx, QueryPreprocessor &qPreproc, const RdxContext &rdxCtx) { - auto &index = ns.indexes_[qe.idxNo]; + auto &index = ns.indexes_[qe.IndexNo()]; isIndexFt = IsFullText(index->Type()); isIndexSparse = index->Opts().IsSparse(); @@ -289,31 +255,31 @@ SelectKeyResults SelectIteratorContainer::processQueryEntry(const QueryEntry &qe opts.forceComparator = 1; } } - if (qe.distinct) { + if (qe.Distinct()) { opts.distinct = 1; } opts.maxIterations = GetMaxIterations(); opts.indexesNotOptimized = !ctx_->sortingContext.enableSortOrders; opts.inTransaction = ctx_->inTransaction; - auto ctx = selectFnc ? selectFnc->CreateCtx(qe.idxNo) : BaseFunctionCtx::Ptr{}; + auto ctx = selectFnc ? selectFnc->CreateCtx(qe.IndexNo()) : BaseFunctionCtx::Ptr{}; if (ctx && ctx->type == BaseFunctionCtx::kFtCtx) ftCtx = reindexer::reinterpret_pointer_cast(ctx); if (index->Opts().GetCollateMode() == CollateUTF8 || isIndexFt) { - for (auto &key : qe.values) key.EnsureUTF8(); + for (auto &key : qe.Values()) key.EnsureUTF8(); } PerfStatCalculatorMT calc(index->GetSelectPerfCounter(), ns.enablePerfCounters_); if (qPreproc.IsFtPreselected()) { - return index->SelectKey(qe.values, qe.condition, opts, ctx, qPreproc.MoveFtPreselect(), rdxCtx); + return index->SelectKey(qe.Values(), qe.Condition(), opts, ctx, qPreproc.MoveFtPreselect(), rdxCtx); } else { - return index->SelectKey(qe.values, qe.condition, sortId, opts, ctx, rdxCtx); + return index->SelectKey(qe.Values(), qe.Condition(), sortId, opts, ctx, rdxCtx); } } void SelectIteratorContainer::processJoinEntry(const JoinQueryEntry &jqe, OpType op) { auto &js = (*ctx_->joinedSelectors)[jqe.joinIndex]; if (js.JoinQuery().joinEntries_.empty()) throw Error(errQueryExec, "Join without ON conditions"); - if (js.JoinQuery().joinEntries_[0].op_ == OpOr) throw Error(errQueryExec, "The first ON condition cannot have OR operation"); + if (js.JoinQuery().joinEntries_[0].Operation() == OpOr) throw Error(errQueryExec, "The first ON condition cannot have OR operation"); if (js.Type() != InnerJoin && js.Type() != OrInnerJoin) throw Error(errLogic, "Not INNER JOIN in QueryEntry"); if (js.Type() == OrInnerJoin) { if (op == OpNot) throw Error(errQueryExec, "NOT operator with or_inner_join"); @@ -327,7 +293,7 @@ void SelectIteratorContainer::processJoinEntry(const JoinQueryEntry &jqe, OpType } void SelectIteratorContainer::processQueryEntryResults(SelectKeyResults &selectResults, OpType op, const NamespaceImpl &ns, - const QueryEntry &qe, bool isIndexFt, bool isIndexSparse, bool nonIndexField, + const QueryEntry &qe, bool isIndexFt, bool isIndexSparse, std::optional nextOp) { if (selectResults.empty()) { if (op == OpAnd) { @@ -340,19 +306,19 @@ void SelectIteratorContainer::processQueryEntryResults(SelectKeyResults &selectR case OpOr: { const iterator last = lastAppendedOrClosed(); if (last == this->end()) { - throw Error(errQueryExec, "OR operator in first condition or after left join "); + throw Error(errQueryExec, "OR operator in first condition or after left join"); } if (last->HoldsOrReferTo() && !last->Value().distinct && last->operation != OpNot) { if (last->IsRef()) { last->SetValue(last->Value()); } SelectIterator &it = last->Value(); - if (nonIndexField || isIndexSparse) { + if (!qe.IsFieldIndexed() || isIndexSparse) { it.Append(res); } else { - it.AppendAndBind(res, ns.payloadType_, qe.idxNo); + it.AppendAndBind(res, ns.payloadType_, qe.IndexNo()); } - it.name += " or " + qe.index; + it.name += " or " + qe.FieldName(); break; } } @@ -360,16 +326,16 @@ void SelectIteratorContainer::processQueryEntryResults(SelectKeyResults &selectR case OpNot: case OpAnd: // Iterator Field Kind: Query entry results. Field known. - Append(op, SelectIterator(res, qe.distinct, qe.index, - qe.idxNo < 0 ? IteratorFieldKind::NonIndexed : IteratorFieldKind::Indexed, isIndexFt)); - if (!nonIndexField && !isIndexSparse) { + Append(op, res, qe.Distinct(), qe.FieldName(), + qe.IndexNo() < 0 ? IteratorFieldKind::NonIndexed : IteratorFieldKind::Indexed, isIndexFt); + if (qe.IsFieldIndexed() && !isIndexSparse) { // last appended is always a SelectIterator const auto lastAppendedIt = lastAppendedOrClosed(); if (lastAppendedIt->IsRef()) { lastAppendedIt->SetValue(lastAppendedIt->Value()); } SelectIterator &lastAppended = lastAppendedIt->Value(); - lastAppended.Bind(ns.payloadType_, qe.idxNo); + lastAppended.Bind(ns.payloadType_, qe.IndexNo()); lastAppended.SetNotOperationFlag(op == OpNot); const auto maxIterations = lastAppended.GetMaxIterations(); const int cur = op == OpNot ? ns.items_.size() - maxIterations : maxIterations; @@ -393,25 +359,23 @@ void SelectIteratorContainer::processEqualPositions(const std::vector(eqPos.queryEntriesPositions[0])}; - if (firstQe.condition == CondEmpty || (firstQe.condition == CondSet && firstQe.values.empty())) { + if (firstQe.Condition() == CondEmpty || (firstQe.Condition() == CondSet && firstQe.Values().empty())) { throw Error(errLogic, "Condition IN(with empty parameter list), IS NULL, IS EMPTY not allowed for equal position!"); } - const KeyValueType type = firstQe.values.size() ? firstQe.values[0].Type() : KeyValueType::Null{}; - Comparator cmp(firstQe.condition, type, firstQe.values, true, firstQe.distinct, ns.payloadType_, FieldsSet({firstQe.idxNo})); + const KeyValueType type = firstQe.Values().size() ? firstQe.Values()[0].Type() : KeyValueType::Null{}; + Comparator cmp{firstQe.Condition(), type, firstQe.Values(), true, firstQe.Distinct(), ns.payloadType_, firstQe.Fields()}; for (size_t i = 0; i < eqPos.queryEntriesPositions.size(); ++i) { const QueryEntry &qe = queries.Get(eqPos.queryEntriesPositions[i]); - if (qe.condition == CondEmpty || (qe.condition == CondSet && qe.values.empty())) { + if (qe.Condition() == CondEmpty || (qe.Condition() == CondSet && qe.Values().empty())) { throw Error(errLogic, "Condition IN(with empty parameter list), IS NULL, IS EMPTY not allowed for equal position!"); } - if (qe.idxNo == IndexValueType::SetByJsonPath) { - cmp.BindEqualPosition(ns.tagsMatcher_.path2tag(qe.index), qe.values, qe.condition); - } else if (ns.indexes_[qe.idxNo]->Opts().IsSparse()) { - const TagsPath &tp = ns.indexes_[qe.idxNo]->Fields().getTagsPath(0); - cmp.BindEqualPosition(tp, qe.values, qe.condition); + assertrx_throw(qe.Fields().size() == 1); + if (qe.Fields()[0] == IndexValueType::SetByJsonPath) { + cmp.BindEqualPosition(qe.Fields().getFieldsPath(0), qe.Values(), qe.Condition()); } else { - cmp.BindEqualPosition(qe.idxNo, qe.values, qe.condition); + cmp.BindEqualPosition(qe.Fields()[0], qe.Values(), qe.Condition()); } } @@ -450,33 +414,33 @@ std::vector SelectIteratorContainer::pr queries.InvokeAppropriate( j, Skip{}, [&](const QueryEntry &eq) { - if (foundFields.find(eq.index) != foundFields.end()) { + if (foundFields.find(eq.FieldName()) != foundFields.end()) { throw Error(errParams, "Equal position field '%s' found twice in enclosing bracket; equal position fields: [%s]", - eq.index, getEpFieldsStr()); + eq.FieldName(), getEpFieldsStr()); } - const auto it = epFields.find(eq.index); + const auto it = epFields.find(eq.FieldName()); if (it == epFields.end()) return; if (queries.GetOperation(j) != OpAnd || (next < end && queries.GetOperation(next) == OpOr)) { throw Error(errParams, "Only AND operation allowed for equal position; equal position field with not AND operation: '%s'; " "equal position fields: [%s]", - eq.index, getEpFieldsStr()); + eq.FieldName(), getEpFieldsStr()); } result[i].queryEntriesPositions.push_back(j); foundFields.insert(epFields.extract(it)); }, [&](const BetweenFieldsQueryEntry &eq) { // TODO equal positions for BetweenFieldsQueryEntry #1092 - if (epFields.find(eq.firstIndex) != epFields.end()) { + if (epFields.find(eq.LeftFieldName()) != epFields.end()) { throw Error( errParams, "Equal positions for conditions between fields are not supported; field: '%s'; equal position fields: [%s]", - eq.firstIndex, getEpFieldsStr()); + eq.LeftFieldName(), getEpFieldsStr()); } - if (epFields.find(eq.secondIndex) != epFields.end()) { + if (epFields.find(eq.RightFieldName()) != epFields.end()) { throw Error( errParams, "Equal positions for conditions between fields are not supported; field: '%s'; equal position fields: [%s]", - eq.secondIndex, getEpFieldsStr()); + eq.RightFieldName(), getEpFieldsStr()); } }); } @@ -517,28 +481,16 @@ bool SelectIteratorContainer::prepareIteratorsForSelectLoop(QueryPreprocessor &q return contFT; }, [&](const QueryEntry &qe) { - const bool isFT = qe.idxNo != IndexValueType::SetByJsonPath && IsFullText(ns.indexes_[qe.idxNo]->Type()); + const bool isFT = qe.IsFieldIndexed() && IsFullText(ns.indexes_[qe.IndexNo()]->Type()); if (isFT && (op == OpOr || (next < end && queries.GetOperation(next) == OpOr))) { throw Error(errLogic, "OR operation is not allowed with fulltext index"); } SelectKeyResults selectResults; bool isIndexFt = false, isIndexSparse = false; - const bool nonIndexField = (qe.idxNo == IndexValueType::SetByJsonPath); - - if (nonIndexField) { - auto strictMode = ns.config_.strictMode; - if (ctx_) { - if (ctx_->inTransaction) { - strictMode = StrictModeNone; - } else if (ctx_->query.strictMode != StrictModeNotSet) { - strictMode = ctx_->query.strictMode; - } - } - selectResults = processQueryEntry(qe, ns, strictMode); - } else { - bool enableSortIndexOptimize = (ctx_->sortingContext.uncommitedIndex == qe.idxNo) && !sortIndexFound && - (op == OpAnd) && !qe.distinct && (begin == 0) && + if (qe.IsFieldIndexed()) { + bool enableSortIndexOptimize = (ctx_->sortingContext.uncommitedIndex == qe.IndexNo()) && !sortIndexFound && + (op == OpAnd) && !qe.Distinct() && (begin == 0) && (next == end || queries.GetOperation(next) != OpOr); if (enableSortIndexOptimize) { if (!IsExpectingOrderedResults(qe)) { @@ -549,12 +501,22 @@ bool SelectIteratorContainer::prepareIteratorsForSelectLoop(QueryPreprocessor &q } selectResults = processQueryEntry(qe, enableSortIndexOptimize, ns, sortId, isQueryFt, selectFnc, isIndexFt, isIndexSparse, ftCtx, qPreproc, rdxCtx); + } else { + auto strictMode = ns.config_.strictMode; + if (ctx_) { + if (ctx_->inTransaction) { + strictMode = StrictModeNone; + } else if (ctx_->query.strictMode != StrictModeNotSet) { + strictMode = ctx_->query.strictMode; + } + } + selectResults = processQueryEntry(qe, ns, strictMode); } std::optional nextOp; if (next != end) { nextOp = queries.GetOperation(next); } - processQueryEntryResults(selectResults, op, ns, qe, isIndexFt, isIndexSparse, nonIndexField, nextOp); + processQueryEntryResults(selectResults, op, ns, qe, isIndexFt, isIndexSparse, nextOp); if (op != OpOr) { for (auto &ep : equalPositions) { const auto lastPosition = ep.queryEntriesPositions.back(); @@ -574,9 +536,9 @@ bool SelectIteratorContainer::prepareIteratorsForSelectLoop(QueryPreprocessor &q return false; }, [&](const BetweenFieldsQueryEntry &qe) { - FieldsComparator fc{qe.firstIndex, qe.Condition(), qe.secondIndex, ns.payloadType_}; - processField(fc, qe.firstIndex, qe.firstIdxNo, ns); - processField(fc, qe.secondIndex, qe.secondIdxNo, ns); + FieldsComparator fc{qe.LeftFieldName(), qe.Condition(), qe.RightFieldName(), ns.payloadType_}; + processField(fc, qe.LeftFieldData(), ns); + processField(fc, qe.RightFieldData(), ns); Append(op, std::move(fc)); return false; }, @@ -759,11 +721,11 @@ void JoinSelectIterator::Dump(WrSerializer &ser, const std::vector - void processField(FieldsComparator &, std::string_view field, int idxNo, const NamespaceImpl &ns) const; + void processField(FieldsComparator &, const QueryField &, const NamespaceImpl &) const; void processJoinEntry(const JoinQueryEntry &, OpType); void processQueryEntryResults(SelectKeyResults &selectResults, OpType, const NamespaceImpl &ns, const QueryEntry &qe, bool isIndexFt, - bool isIndexSparse, bool nonIndexField, std::optional nextOp); + bool isIndexSparse, std::optional nextOp); struct EqualPositions { h_vector queryEntriesPositions; size_t positionToInsertIterator = 0; diff --git a/cpp_src/core/payload/fieldsset.h b/cpp_src/core/payload/fieldsset.h index 87da7eefe..98c0062ae 100644 --- a/cpp_src/core/payload/fieldsset.h +++ b/cpp_src/core/payload/fieldsset.h @@ -75,18 +75,8 @@ class FieldsSet : protected base_fields_set { } } - void push_back(const TagsPath &tagsPath) { - if (!contains(tagsPath)) { - base_fields_set::push_back(IndexValueType::SetByJsonPath); - tagsPaths_.emplace_back(tagsPath); - } - } - void push_back(TagsPath &&tagsPath) { - if (!contains(tagsPath)) { - base_fields_set::push_back(IndexValueType::SetByJsonPath); - tagsPaths_.emplace_back(std::move(tagsPath)); - } - } + void push_back(const TagsPath &tagsPath) { pushBack(tagsPath); } + void push_back(TagsPath &&tagsPath) { pushBack(std::move(tagsPath)); } void push_front(TagsPath &&tagsPath) { if (!contains(tagsPath)) { base_fields_set::insert(begin(), IndexValueType::SetByJsonPath); @@ -94,18 +84,10 @@ class FieldsSet : protected base_fields_set { } } - void push_back(const IndexedTagsPath &tagsPath) { - if (!contains(tagsPath)) { - base_fields_set::push_back(IndexValueType::SetByJsonPath); - tagsPaths_.emplace_back(tagsPath); - } - } - void push_back(IndexedTagsPath &&tagsPath) { - if (!contains(tagsPath)) { - base_fields_set::push_back(IndexValueType::SetByJsonPath); - tagsPaths_.emplace_back(std::move(tagsPath)); - } - } + void push_back(const IndexedTagsPath &tagsPath) { pushBack(tagsPath); } + void push_back(IndexedTagsPath &&tagsPath) { pushBack(std::move(tagsPath)); } + void push_back(const FieldsPath &fieldPath) { pushBack(fieldPath); } + void push_back(FieldsPath &&fieldPath) { pushBack(std::move(fieldPath)); } void push_back(int f) { if (f < 0) return; @@ -146,31 +128,34 @@ class FieldsSet : protected base_fields_set { bool contains(const IndexesFieldsSet &f) const noexcept { return (mask_ & f.mask()) == f.mask(); } bool contains(const TagsPath &tagsPath) const noexcept { for (const FieldsPath &path : tagsPaths_) { - if (path.index() == 0) { - if (std::get(path) == tagsPath) return true; - } else { - if (std::get(path).Compare(tagsPath)) return true; + if (std::visit(overloaded{[&tagsPath](const TagsPath &path) { return path == tagsPath; }, + [&tagsPath](const IndexedTagsPath &path) { return path.Compare(tagsPath); }}, + path)) { + return true; } } return false; } bool contains(const IndexedTagsPath &tagsPath) const noexcept { for (const FieldsPath &path : tagsPaths_) { - if (path.index() == 1) { - if (std::get(path) == tagsPath) return true; - } else { - if (tagsPath.Compare(std::get(path))) return true; + if (std::visit(overloaded{[&tagsPath](const TagsPath &path) { return tagsPath.Compare(path); }, + [&tagsPath](const IndexedTagsPath &path) { return path == tagsPath; }}, + path)) { + return true; } } return false; } + bool contains(const FieldsPath &fieldsPath) const noexcept { + return std::visit([&](const auto &fp) { return contains(fp); }, fieldsPath); + } bool match(const TagsPath &tagsPath) const noexcept { if (tagsPaths_.empty()) return true; - for (auto &flt : tagsPaths_) { - if (flt.index() == 0) { - if (comparePaths(tagsPath, std::get(flt))) return true; - } else { - if (comparePaths(std::get(flt), tagsPath)) return true; + for (auto &path : tagsPaths_) { + if (std::visit(overloaded{[&tagsPath, this](const TagsPath &path) { return comparePaths(tagsPath, path); }, + [&tagsPath, this](const IndexedTagsPath &path) { return comparePaths(path, tagsPath); }}, + path)) { + return true; } } return false; @@ -178,11 +163,11 @@ class FieldsSet : protected base_fields_set { template bool match(const IndexedTagsPathImpl &tagsPath) const noexcept { if (tagsPaths_.empty()) return true; - for (auto &flt : tagsPaths_) { - if (flt.index() == 1) { - if (comparePaths(tagsPath, std::get(flt))) return true; - } else { - if (comparePaths(tagsPath, std::get(flt))) return true; + for (auto &path : tagsPaths_) { + if (std::visit(overloaded{[&tagsPath, this](const TagsPath &path) { return comparePaths(tagsPath, path); }, + [&tagsPath, this](const IndexedTagsPath &path) { return comparePaths(tagsPath, path); }}, + path)) { + return true; } } return false; @@ -199,12 +184,15 @@ class FieldsSet : protected base_fields_set { const h_vector &getJsonPaths() const noexcept { return jsonPaths_; } bool isTagsPathIndexed(size_t idx) const noexcept { assertrx(idx < tagsPaths_.size()); - return (tagsPaths_[idx].index() == 1); + return std::visit(overloaded{[](const TagsPath &) { return false; }, [](const IndexedTagsPath &) { return true; }}, + tagsPaths_[idx]); } const TagsPath &getTagsPath(size_t idx) const & { return std::get(tagsPaths_[idx]); } const TagsPath &getTagsPath(size_t idx) const && = delete; const IndexedTagsPath &getIndexedTagsPath(size_t idx) const & { return std::get(tagsPaths_[idx]); } const IndexedTagsPath &getIndexedTagsPath(size_t idx) const && = delete; + const FieldsPath &getFieldsPath(size_t idx) const & { return tagsPaths_[idx]; } + const FieldsPath &getFieldsPath(size_t idx) const && = delete; const std::string &getJsonPath(size_t idx) const &noexcept { return jsonPaths_[idx]; } const std::string &getJsonPath(size_t idx) const && = delete; @@ -235,7 +223,14 @@ class FieldsSet : protected base_fields_set { os << "]}"; } -protected: +private: + template + void pushBack(F &&fieldPath) { + if (!contains(fieldPath)) { + base_fields_set::push_back(IndexValueType::SetByJsonPath); + tagsPaths_.emplace_back(std::forward(fieldPath)); + } + } template bool comparePaths(const TPath1 &lhs, const TPath2 &rhs) const noexcept { unsigned i = 0, count = std::min(lhs.size(), rhs.size()); diff --git a/cpp_src/core/payload/payloadiface.cc b/cpp_src/core/payload/payloadiface.cc index 110d0b80f..64b50daec 100644 --- a/cpp_src/core/payload/payloadiface.cc +++ b/cpp_src/core/payload/payloadiface.cc @@ -5,7 +5,6 @@ #include "core/keyvalue/p_string.h" #include "core/keyvalue/variant.h" #include "core/namespace/stringsholder.h" -#include "itoa/itoa.h" #include "payloadiface.h" #include "payloadvalue.h" @@ -77,29 +76,77 @@ void PayloadIface::GetByJsonPath(std::string_view jsonPath, TagsMatcher &tags } template -void PayloadIface::GetByJsonPath(const TagsPath &jsonPath, VariantArray &krefs, KeyValueType expectedType) const { - ConstPayload pl(t_, *v_); - FieldsSet filter({jsonPath}); - BaseEncoder encoder(nullptr, &filter); +template +void PayloadIface::getByJsonPath(const P &path, VariantArray &krefs, KeyValueType expectedType) const { krefs.clear(); - if (!jsonPath.empty()) { - FieldsExtractor extractor(&krefs, expectedType, jsonPath.size()); - encoder.Encode(pl, extractor); + if (path.empty()) { + return; } + const FieldsSet filter{{path}}; + ConstPayload pl(t_, *v_); + BaseEncoder encoder(nullptr, &filter); + FieldsExtractor extractor(&krefs, expectedType, path.size(), &filter); + encoder.Encode(pl, extractor); +} + +template +void PayloadIface::GetByJsonPath(const TagsPath &tagsPath, VariantArray &krefs, KeyValueType expectedType) const { + getByJsonPath(tagsPath, krefs, expectedType); } template void PayloadIface::GetByJsonPath(const IndexedTagsPath &tagsPath, VariantArray &krefs, KeyValueType expectedType) const { - ConstPayload pl(t_, *v_); - FieldsSet filter({tagsPath}); - BaseEncoder encoder(nullptr, &filter); - krefs.Clear(); - if (!tagsPath.empty()) { - FieldsExtractor extractor(&krefs, expectedType, tagsPath.size(), &filter); - encoder.Encode(pl, extractor); + getByJsonPath(tagsPath, krefs, expectedType); +} + +template +void PayloadIface::GetByFieldsSet(const FieldsSet &fields, VariantArray &kvs, KeyValueType expectedType, + const std::vector &expectedCompositeTypes) const { + if (expectedType.Is()) { + kvs.Clear(); + kvs.emplace_back(GetComposite(fields, expectedCompositeTypes)); + } else { + assertrx_throw(fields.size() == 1); + if (fields[0] == IndexValueType::SetByJsonPath) { + assertrx_throw(fields.getTagsPathsLength() == 1); + if (fields.isTagsPathIndexed(0)) { + getByJsonPath(fields.getIndexedTagsPath(0), kvs, expectedType); + } else { + getByJsonPath(fields.getTagsPath(0), kvs, expectedType); + } + } else { + Get(fields[0], kvs); + } } } +template +Variant PayloadIface::GetComposite(const FieldsSet &fields, const std::vector &expectedTypes) const { + thread_local VariantArray buffer; + buffer.clear(); + assertrx_throw(fields.size() == expectedTypes.size()); + size_t jsonFieldIdx{0}; + [[maybe_unused]] const size_t maxJsonFieldIdx{fields.getTagsPathsLength()}; + VariantArray buf; + for (size_t i = 0, s = fields.size(); i < s; ++i) { + buf.clear(); + if (fields[i] == IndexValueType::SetByJsonPath) { + assertrx_throw(jsonFieldIdx < maxJsonFieldIdx); + if (fields.isTagsPathIndexed(jsonFieldIdx)) { + getByJsonPath(fields.getIndexedTagsPath(jsonFieldIdx), buf, expectedTypes[i]); + } else { + getByJsonPath(fields.getTagsPath(jsonFieldIdx), buf, expectedTypes[i]); + } + ++jsonFieldIdx; + } else { + Get(fields[i], buf); + } + assertrx_throw(buf.size() == 1); + buffer.emplace_back(std::move(buf[0])); + } + return Variant{buffer}; +} + template VariantArray PayloadIface::GetIndexedArrayData(const IndexedTagsPath &tagsPath, int field, int &offset, int &size) const { if (tagsPath.empty()) { @@ -199,8 +246,13 @@ void PayloadIface::SerializeFields(WrSerializer &ser, const FieldsSet &fields for (int field : fields) { if (field == IndexValueType::SetByJsonPath) { assertrx(tagPathIdx < fields.getTagsPathsLength()); - const TagsPath &tagsPath = fields.getTagsPath(tagPathIdx); - GetByJsonPath(tagsPath, varr, KeyValueType::Undefined{}); + if (fields.isTagsPathIndexed(tagPathIdx)) { + const IndexedTagsPath &tagsPath = fields.getIndexedTagsPath(tagPathIdx); + GetByJsonPath(tagsPath, varr, KeyValueType::Undefined{}); + } else { + const TagsPath &tagsPath = fields.getTagsPath(tagPathIdx); + GetByJsonPath(tagsPath, varr, KeyValueType::Undefined{}); + } if (varr.empty()) { throw Error(errParams, "PK serializing error: field [%s] cannot not be empty", fields.getJsonPath(tagPathIdx)); } @@ -281,8 +333,13 @@ size_t PayloadIface::GetHash(const FieldsSet &fields) const { ret ^= Field(field).Hash(); } else { assertrx(tagPathIdx < fields.getTagsPathsLength()); - const TagsPath &tagsPath = fields.getTagsPath(tagPathIdx++); - GetByJsonPath(tagsPath, keys1, KeyValueType::Undefined{}); + if (fields.isTagsPathIndexed(tagPathIdx)) { + const IndexedTagsPath &tagsPath = fields.getIndexedTagsPath(tagPathIdx++); + GetByJsonPath(tagsPath, keys1, KeyValueType::Undefined{}); + } else { + const TagsPath &tagsPath = fields.getTagsPath(tagPathIdx++); + GetByJsonPath(tagsPath, keys1, KeyValueType::Undefined{}); + } ret ^= keys1.Hash(); } } @@ -333,9 +390,15 @@ bool PayloadIface::IsEQ(const T &other, const FieldsSet &fields) const { if (!Field(field).IsEQ(o.Field(field))) return false; } } else { - const TagsPath &tagsPath = fields.getTagsPath(tagPathIdx++); - GetByJsonPath(tagsPath, keys1, KeyValueType::Undefined{}); - o.GetByJsonPath(tagsPath, keys2, KeyValueType::Undefined{}); + if (fields.isTagsPathIndexed(tagPathIdx)) { + const IndexedTagsPath &tagsPath = fields.getIndexedTagsPath(tagPathIdx++); + GetByJsonPath(tagsPath, keys1, KeyValueType::Undefined{}); + o.GetByJsonPath(tagsPath, keys2, KeyValueType::Undefined{}); + } else { + const TagsPath &tagsPath = fields.getTagsPath(tagPathIdx++); + GetByJsonPath(tagsPath, keys1, KeyValueType::Undefined{}); + o.GetByJsonPath(tagsPath, keys2, KeyValueType::Undefined{}); + } if (keys1 != keys2) { return false; } @@ -355,35 +418,44 @@ int PayloadIface::Compare(const T &other, const FieldsSet &fields, size_t &fi bool commonOpts = (collateOpts.size() == 1); for (size_t i = 0; i < fields.size(); ++i) { - int cmpRes = 0; const auto field(fields[i]); const CollateOpts *opts(commonOpts ? collateOpts[0] : collateOpts[i]); if (field != IndexValueType::SetByJsonPath) { - cmpRes = Field(field).Get().Compare(o.Field(field).Get(), opts ? *opts : CollateOpts()); + int cmpRes = Field(field).Get().Compare(o.Field(field).Get(), opts ? *opts : CollateOpts()); + if (cmpRes) { + firstDifferentFieldIdx = i; + return cmpRes; + } } else { assertrx(tagPathIdx < fields.getTagsPathsLength()); - const TagsPath &tagsPath = fields.getTagsPath(tagPathIdx++); - GetByJsonPath(tagsPath, krefs1, KeyValueType::Undefined{}); - o.GetByJsonPath(tagsPath, krefs2, KeyValueType::Undefined{}); + if (fields.isTagsPathIndexed(tagPathIdx)) { + const IndexedTagsPath &tagsPath = fields.getIndexedTagsPath(tagPathIdx++); + GetByJsonPath(tagsPath, krefs1, KeyValueType::Undefined{}); + o.GetByJsonPath(tagsPath, krefs2, KeyValueType::Undefined{}); + } else { + const TagsPath &tagsPath = fields.getTagsPath(tagPathIdx++); + GetByJsonPath(tagsPath, krefs1, KeyValueType::Undefined{}); + o.GetByJsonPath(tagsPath, krefs2, KeyValueType::Undefined{}); + } size_t length = std::min(krefs1.size(), krefs2.size()); for (size_t j = 0; j < length; ++j) { - cmpRes = krefs1[j].RelaxCompare(krefs2[j], opts ? *opts : CollateOpts()); - if (cmpRes) break; - } - if (cmpRes == 0) { - if (krefs1.size() < krefs2.size()) { - cmpRes = -1; - } else if (krefs1.size() > krefs2.size()) { - cmpRes = 1; + int cmpRes = krefs1[j].RelaxCompare(krefs2[j], opts ? *opts : CollateOpts()); + if (cmpRes) { + firstDifferentFieldIdx = i; + return cmpRes; } } - } - - firstDifferentFieldIdx = i; - if (cmpRes > 0) return 1; - if (cmpRes < 0) return -1; + if (krefs1.size() < krefs2.size()) { + firstDifferentFieldIdx = i; + return -1; + } + if (krefs1.size() > krefs2.size()) { + firstDifferentFieldIdx = i; + return 1; + } + } } return 0; } diff --git a/cpp_src/core/payload/payloadiface.h b/cpp_src/core/payload/payloadiface.h index 39569e4c3..1781e1ce9 100644 --- a/cpp_src/core/payload/payloadiface.h +++ b/cpp_src/core/payload/payloadiface.h @@ -112,6 +112,9 @@ class PayloadIface { void GetByJsonPath(std::string_view jsonPath, TagsMatcher &tagsMatcher, VariantArray &, KeyValueType expectedType) const; void GetByJsonPath(const TagsPath &jsonPath, VariantArray &, KeyValueType expectedType) const; void GetByJsonPath(const IndexedTagsPath &jsonPath, VariantArray &, KeyValueType expectedType) const; + void GetByFieldsSet(const FieldsSet &, VariantArray &, KeyValueType expectedType, + const std::vector &expectedCompositeTypes) const; + [[nodiscard]] Variant GetComposite(const FieldsSet &, const std::vector &expectedTypes) const; VariantArray GetIndexedArrayData(const IndexedTagsPath &jsonPath, int field, int &offset, int &size) const; // Get fields count @@ -167,6 +170,8 @@ class PayloadIface { T CopyWithRemovedFields(PayloadType t); template void copyOrMoveStrings(int field, StrHolder &dest, bool copy); + template + void getByJsonPath(const P &path, VariantArray &, KeyValueType expectedType) const; template ::value>::type * = nullptr> void setArray(int field, const VariantArray &keys, bool append); diff --git a/cpp_src/core/payload/payloadtype.h b/cpp_src/core/payload/payloadtype.h index b6cb5a240..96e92f40d 100644 --- a/cpp_src/core/payload/payloadtype.h +++ b/cpp_src/core/payload/payloadtype.h @@ -18,7 +18,7 @@ class PayloadType : public shared_cow_ptr { PayloadType &operator=(PayloadType &&) = default; PayloadType &operator=(const PayloadType &) = default; PayloadType(const std::string &name, std::initializer_list fields = {}); - PayloadType(const PayloadTypeImpl &impl); + explicit PayloadType(const PayloadTypeImpl &impl); ~PayloadType(); const PayloadFieldType &Field(int field) const; diff --git a/cpp_src/core/payload/payloadvalue.cc b/cpp_src/core/payload/payloadvalue.cc index 7e438b0cd..d1b07a365 100644 --- a/cpp_src/core/payload/payloadvalue.cc +++ b/cpp_src/core/payload/payloadvalue.cc @@ -1,27 +1,19 @@ #include "payloadvalue.h" -#include +#include #include "core/keyvalue/p_string.h" -#include "string.h" -#include "tools/errors.h" + namespace reindexer { PayloadValue::PayloadValue(size_t size, const uint8_t *ptr, size_t cap) : p_(nullptr) { p_ = alloc((cap != 0) ? cap : size); - if (ptr) + if (ptr) { memcpy(Ptr(), ptr, size); - else + } else { memset(Ptr(), 0, size); -} - -PayloadValue::PayloadValue(const PayloadValue &other) noexcept : p_(other.p_) { - if (p_) { - header()->refcount.fetch_add(1, std::memory_order_relaxed); } } -PayloadValue::~PayloadValue() { release(); } - uint8_t *PayloadValue::alloc(size_t cap) { auto pn = reinterpret_cast(operator new(cap + sizeof(dataHeader))); dataHeader *nheader = reinterpret_cast(pn); @@ -44,7 +36,7 @@ void PayloadValue::release() noexcept { void PayloadValue::Clone(size_t size) { // If we have exclusive data - just up lsn - if (p_ && header()->refcount.load() == 1) { + if (p_ && header()->refcount.load(std::memory_order_acquire) == 1) { return; } assertrx(size || p_); @@ -64,7 +56,7 @@ void PayloadValue::Clone(size_t size) { void PayloadValue::Resize(size_t oldSize, size_t newSize) { assertrx(p_); - assertrx(header()->refcount.load() == 1); + assertrx(header()->refcount.load(std::memory_order_acquire) == 1); if (newSize <= header()->cap) return; diff --git a/cpp_src/core/payload/payloadvalue.h b/cpp_src/core/payload/payloadvalue.h index 96a53b0b1..b94ba1198 100644 --- a/cpp_src/core/payload/payloadvalue.h +++ b/cpp_src/core/payload/payloadvalue.h @@ -14,18 +14,22 @@ class PayloadValue { struct dataHeader { dataHeader() noexcept : refcount(1), cap(0), lsn(-1) {} - ~dataHeader() { assertrx(refcount.load() == 0); } + ~dataHeader() { assertrx(refcount.load(std::memory_order_acquire) == 0); } refcounter refcount; unsigned cap; int64_t lsn; }; PayloadValue() noexcept : p_(nullptr) {} - PayloadValue(const PayloadValue &) noexcept; + PayloadValue(const PayloadValue &other) noexcept : p_(other.p_) { + if (p_) { + header()->refcount.fetch_add(1, std::memory_order_relaxed); + } + } // Alloc payload store with size, and copy data from another array PayloadValue(size_t size, const uint8_t *ptr = nullptr, size_t cap = 0); - ~PayloadValue(); - PayloadValue &operator=(const PayloadValue &other) { + ~PayloadValue() { release(); } + PayloadValue &operator=(const PayloadValue &other) noexcept { if (&other != this) { release(); p_ = other.p_; @@ -52,8 +56,8 @@ class PayloadValue { uint8_t *Ptr() const noexcept { return p_ + sizeof(dataHeader); } void SetLSN(int64_t lsn) { header()->lsn = lsn; } int64_t GetLSN() const { return p_ ? header()->lsn : 0; } - bool IsFree() const { return bool(p_ == nullptr); } - void Free() { release(); } + bool IsFree() const noexcept { return bool(p_ == nullptr); } + void Free() noexcept { release(); } size_t GetCapacity() const noexcept { return header()->cap; } const uint8_t *get() const noexcept { return p_; } diff --git a/cpp_src/core/query/dsl/dslencoder.cc b/cpp_src/core/query/dsl/dslencoder.cc index 3561d5b24..b8e92f6e3 100644 --- a/cpp_src/core/query/dsl/dslencoder.cc +++ b/cpp_src/core/query/dsl/dslencoder.cc @@ -114,10 +114,10 @@ void encodeAggregationFunctions(const Query& query, JsonBuilder& builder) { } void encodeJoinEntry(const QueryJoinEntry& joinEntry, JsonBuilder& builder) { - builder.Put("left_field", joinEntry.index_); - builder.Put("right_field", joinEntry.joinIndex_); - builder.Put("cond", get(cond_map, joinEntry.condition_)); - builder.Put("op", get(op_map, joinEntry.op_)); + builder.Put("left_field", joinEntry.LeftFieldName()); + builder.Put("right_field", joinEntry.RightFieldName()); + builder.Put("cond", get(cond_map, joinEntry.Condition())); + builder.Put("op", get(op_map, joinEntry.Operation())); } void encodeSingleJoinQuery(const JoinedQuery& joinQuery, JsonBuilder& builder) { @@ -125,9 +125,9 @@ void encodeSingleJoinQuery(const JoinedQuery& joinQuery, JsonBuilder& builder) { auto node = builder.Object("join_query"sv); node.Put("type", get(join_types, joinQuery.joinType)); - node.Put("namespace", joinQuery._namespace); - node.Put("limit", joinQuery.count); - node.Put("offset", joinQuery.start); + node.Put("namespace", joinQuery.NsName()); + node.Put("limit", joinQuery.Limit()); + node.Put("offset", joinQuery.Offset()); encodeFilters(joinQuery, node); encodeSorting(joinQuery.sortingEntries_, node); @@ -147,18 +147,18 @@ void encodeSingleJoinQuery(const JoinedQuery& joinQuery, JsonBuilder& builder) { } void encodeFilter(const QueryEntry& qentry, JsonBuilder& builder) { - if (qentry.distinct) return; - builder.Put("cond", get(cond_map, CondType(qentry.condition))); - builder.Put("field", qentry.index); + if (qentry.Distinct()) return; + builder.Put("cond", get(cond_map, CondType(qentry.Condition()))); + builder.Put("field", qentry.FieldName()); - if (qentry.values.empty()) return; - if (qentry.values.size() > 1 || qentry.values[0].Type().Is()) { + if (qentry.Values().empty()) return; + if (qentry.Values().size() > 1 || qentry.Values()[0].Type().Is()) { auto arrNode = builder.Array("value"); - for (const Variant& kv : qentry.values) { + for (const Variant& kv : qentry.Values()) { arrNode.Put(nullptr, kv); } } else { - builder.Put("value", qentry.values[0]); + builder.Put("value", qentry.Values()[0]); } } @@ -201,10 +201,10 @@ void encodeUpdateFields(const Query& query, JsonBuilder& builder) { void toDsl(const Query& query, JsonBuilder& builder) { switch (query.Type()) { case QueryType::QuerySelect: { - builder.Put("namespace", query._namespace); - builder.Put("limit", query.count); - builder.Put("offset", query.start); - builder.Put("req_total", get(reqtotal_values, query.calcTotal)); + builder.Put("namespace", query.NsName()); + builder.Put("limit", query.Limit()); + builder.Put("offset", query.Offset()); + builder.Put("req_total", get(reqtotal_values, query.CalcTotal())); builder.Put("explain", query.explain_); builder.Put("type", "select"); auto strictMode = strictModeToString(query.strictMode); @@ -222,7 +222,7 @@ void toDsl(const Query& query, JsonBuilder& builder) { break; } case QueryType::QueryUpdate: { - builder.Put("namespace", query._namespace); + builder.Put("namespace", query.NsName()); builder.Put("explain", query.explain_); builder.Put("type", "update"); encodeFilters(query, builder); @@ -244,14 +244,14 @@ void toDsl(const Query& query, JsonBuilder& builder) { break; } case QueryType::QueryDelete: { - builder.Put("namespace", query._namespace); + builder.Put("namespace", query.NsName()); builder.Put("explain", query.explain_); builder.Put("type", "delete"); encodeFilters(query, builder); break; } case QueryType::QueryTruncate: { - builder.Put("namespace", query._namespace); + builder.Put("namespace", query.NsName()); builder.Put("type", "truncate"); break; } @@ -283,7 +283,7 @@ void QueryEntries::toDsl(const_iterator it, const_iterator to, const Query& pare dsl::encodeEqualPositions(bracket.equalPositions, arrNode); }, [&node](const QueryEntry& qe) { - if (qe.distinct) return; + if (qe.Distinct()) return; dsl::encodeFilter(qe, node); }, [&node, &parentQuery](const JoinQueryEntry& jqe) { @@ -292,8 +292,8 @@ void QueryEntries::toDsl(const_iterator it, const_iterator to, const Query& pare }, [&node](const BetweenFieldsQueryEntry& qe) { node.Put("cond", dsl::get(dsl::cond_map, CondType(qe.Condition()))); - node.Put("first_field", qe.firstIndex); - node.Put("second_field", qe.secondIndex); + node.Put("first_field", qe.LeftFieldName()); + node.Put("second_field", qe.RightFieldName()); }); } } diff --git a/cpp_src/core/query/dsl/dslparser.cc b/cpp_src/core/query/dsl/dslparser.cc index f5fbfc709..13bac0c61 100644 --- a/cpp_src/core/query/dsl/dslparser.cc +++ b/cpp_src/core/query/dsl/dslparser.cc @@ -1,6 +1,5 @@ #include "dslparser.h" #include "core/cjson/jschemachecker.h" -#include "core/cjson/jsonbuilder.h" #include "core/query/query.h" #include "estl/fast_hash_map.h" #include "gason/gason.h" @@ -324,34 +323,6 @@ static void parseFilter(const JsonValue& filter, Query& q, std::vector(joined_entry_map, name, "join_query.on"sv)) { case JoinEntry::LeftField: checkJsonValueType(value, name, JSON_STRING); - qjoinEntry.index_ = std::string(value.toString()); + leftField = std::string(value.toString()); break; case JoinEntry::RightField: checkJsonValueType(value, name, JSON_STRING); - qjoinEntry.joinIndex_ = std::string(value.toString()); + rightField = std::string(value.toString()); break; case JoinEntry::Cond: checkJsonValueType(value, name, JSON_STRING); - qjoinEntry.condition_ = get(cond_map, value.toString(), "condition enum"sv); + cond = get(cond_map, value.toString(), "condition enum"sv); break; case JoinEntry::Op: checkJsonValueType(value, name, JSON_STRING); - qjoinEntry.op_ = get(op_map, value.toString(), "operation enum"sv); + op = get(op_map, value.toString(), "operation enum"sv); break; } } - qjoin.joinEntries_.emplace_back(qjoinEntry); + qjoin.joinEntries_.emplace_back(op, cond, std::move(leftField), std::move(rightField)); } } @@ -419,7 +392,7 @@ void parseSingleJoinQuery(const JsonValue& join, Query& query) { break; case JoinRoot::Namespace: checkJsonValueType(value, name, JSON_STRING); - qjoin._namespace = std::string(value.toString()); + qjoin.SetNsName(value.toString()); break; case JoinRoot::Filters: checkJsonValueType(value, name, JSON_ARRAY); @@ -430,11 +403,11 @@ void parseSingleJoinQuery(const JsonValue& join, Query& query) { break; case JoinRoot::Limit: checkJsonValueType(value, name, JSON_NUMBER, JSON_DOUBLE); - qjoin.count = static_cast(value.toNumber()); + qjoin.Limit(static_cast(value.toNumber())); break; case JoinRoot::Offset: checkJsonValueType(value, name, JSON_NUMBER, JSON_DOUBLE); - qjoin.start = static_cast(value.toNumber()); + qjoin.Offset(static_cast(value.toNumber())); break; case JoinRoot::On: parseJoinedEntries(value, qjoin); @@ -604,17 +577,17 @@ void parse(const JsonValue& root, Query& q) { switch (get(root_map, name, "root"sv)) { case Root::Namespace: checkJsonValueType(v, name, JSON_STRING); - q._namespace = std::string(v.toString()); + q.SetNsName(v.toString()); break; case Root::Limit: checkJsonValueType(v, name, JSON_NUMBER, JSON_DOUBLE); - q.count = static_cast(v.toNumber()); + q.Limit(static_cast(v.toNumber())); break; case Root::Offset: checkJsonValueType(v, name, JSON_NUMBER, JSON_DOUBLE); - q.start = static_cast(v.toNumber()); + q.Offset(static_cast(v.toNumber())); break; case Root::Filters: @@ -643,7 +616,7 @@ void parse(const JsonValue& root, Query& q) { break; case Root::ReqTotal: checkJsonValueType(v, name, JSON_STRING); - q.calcTotal = get(reqtotal_values, v.toString(), "req_total enum"sv); + q.CalcTotal(get(reqtotal_values, v.toString(), "req_total enum"sv)); break; case Root::Aggregations: checkJsonValueType(v, name, JSON_ARRAY); diff --git a/cpp_src/core/query/query.cc b/cpp_src/core/query/query.cc index 2e7b1da3e..49b6f3dec 100644 --- a/cpp_src/core/query/query.cc +++ b/cpp_src/core/query/query.cc @@ -13,18 +13,15 @@ using namespace std::string_view_literals; const std::string_view kLsnIndexName = "#lsn"sv; const std::string_view kSlaveVersionIndexName = "#slave_version"sv; -Query::Query(const std::string &__namespace, unsigned _start, unsigned _count, CalcTotalMode _calcTotal) - : _namespace(__namespace), start(_start), count(_count), calcTotal(_calcTotal) {} - bool Query::operator==(const Query &obj) const { if (entries != obj.entries) return false; if (aggregations_ != obj.aggregations_) return false; - if (_namespace != obj._namespace) return false; + if (NsName() != obj.NsName()) return false; if (sortingEntries_ != obj.sortingEntries_) return false; - if (calcTotal != obj.calcTotal) return false; - if (start != obj.start) return false; - if (count != obj.count) return false; + if (CalcTotal() != obj.CalcTotal()) return false; + if (Offset() != obj.Offset()) return false; + if (Limit() != obj.Limit()) return false; if (debugLevel != obj.debugLevel) return false; if (strictMode != obj.strictMode) return false; if (forcedSortOrder_.size() != obj.forcedSortOrder_.size()) return false; @@ -52,17 +49,6 @@ Error Query::FromJSON(const std::string &dsl) { return dsl::Parse(dsl, *this); } std::string Query::GetJSON() const { return dsl::toDsl(*this); } -Query &Query::SetObject(std::string field, VariantArray value, bool hasExpressions) & { - for (auto &it : value) { - if (!it.Type().Is()) { - throw Error(errLogic, "Unexpected variant type in SetObject: %s. Expecting KeyValueType::String with JSON-content", - it.Type().Name()); - } - } - updateFields_.emplace_back(std::move(field), std::move(value), FieldModeSetJson, hasExpressions); - return *this; -} - WrSerializer &Query::GetSQL(WrSerializer &ser, bool stripArgs) const { return SQLEncoder(*this).GetSQL(ser, stripArgs); } std::string Query::GetSQL(bool stripArgs) const { @@ -70,6 +56,11 @@ std::string Query::GetSQL(bool stripArgs) const { return std::string(GetSQL(ser, stripArgs).Slice()); } +std::string Query::GetSQL(QueryType realType) const { + WrSerializer ser; + return std::string(SQLEncoder(*this, realType).GetSQL(ser, false).Slice()); +} + void Query::deserialize(Serializer &ser, bool &hasJoinConditions) { bool end = false; std::vector> equalPositions; @@ -77,12 +68,12 @@ void Query::deserialize(Serializer &ser, bool &hasJoinConditions) { int qtype = ser.GetVarUint(); switch (qtype) { case QueryCondition: { - QueryEntry qe; - qe.index = std::string(ser.GetVString()); - OpType op = OpType(ser.GetVarUint()); - qe.condition = CondType(ser.GetVarUint()); + const auto fieldName = ser.GetVString(); + const OpType op = OpType(ser.GetVarUint()); + const CondType condition = CondType(ser.GetVarUint()); int cnt = ser.GetVarUint(); - if (qe.condition == CondDWithin) { + VariantArray values; + if (condition == CondDWithin) { if (cnt != 3) { throw Error(errParseBin, "Expected point and distance for DWithin"); } @@ -90,14 +81,14 @@ void Query::deserialize(Serializer &ser, bool &hasJoinConditions) { point.reserve(2); point.emplace_back(ser.GetVariant().EnsureHold()); point.emplace_back(ser.GetVariant().EnsureHold()); - qe.values.reserve(2); - qe.values.emplace_back(std::move(point)); - qe.values.emplace_back(ser.GetVariant().EnsureHold()); + values.reserve(2); + values.emplace_back(std::move(point)); + values.emplace_back(ser.GetVariant().EnsureHold()); } else { - qe.values.reserve(cnt); - while (cnt--) qe.values.emplace_back(ser.GetVariant().EnsureHold()); + values.reserve(cnt); + while (cnt--) values.emplace_back(ser.GetVariant().EnsureHold()); } - entries.Append(op, std::move(qe)); + entries.Append(op, std::string{fieldName}, condition, std::move(values)); break; } case QueryBetweenFieldsCondition: { @@ -105,12 +96,12 @@ void Query::deserialize(Serializer &ser, bool &hasJoinConditions) { std::string firstField{ser.GetVString()}; CondType condition = static_cast(ser.GetVarUint()); std::string secondField{ser.GetVString()}; - entries.Append(op, BetweenFieldsQueryEntry{std::move(firstField), condition, std::move(secondField)}); + entries.Append(op, std::move(firstField), condition, std::move(secondField)); break; } case QueryAlwaysFalseCondition: { const OpType op = OpType(ser.GetVarUint()); - entries.Append(op, AlwaysFalse{}); + entries.Append(op); break; } case QueryJoinCondition: { @@ -155,12 +146,9 @@ void Query::deserialize(Serializer &ser, bool &hasJoinConditions) { break; } case QueryDistinct: { - QueryEntry qe; - qe.index = std::string(ser.GetVString()); - if (!qe.index.empty()) { - qe.distinct = true; - qe.condition = CondAny; - entries.Append(OpAnd, std::move(qe)); + const auto fieldName = ser.GetVString(); + if (!fieldName.empty()) { + entries.Append(OpAnd, std::string{fieldName}, QueryEntry::DistinctTag{}); } break; } @@ -180,12 +168,12 @@ void Query::deserialize(Serializer &ser, bool &hasJoinConditions) { break; } case QueryJoinOn: { - QueryJoinEntry qje; - qje.op_ = OpType(ser.GetVarUint()); - qje.condition_ = CondType(ser.GetVarUint()); - qje.index_ = std::string(ser.GetVString()); - qje.joinIndex_ = std::string(ser.GetVString()); - reinterpret_cast(this)->joinEntries_.push_back(std::move(qje)); + const OpType op = static_cast(ser.GetVarUint()); + const CondType condition = static_cast(ser.GetVarUint()); + std::string leftFieldName{ser.GetVString()}; + std::string rightFieldName{ser.GetVString()}; + reinterpret_cast(this)->joinEntries_.emplace_back(op, condition, std::move(leftFieldName), + std::move(rightFieldName)); break; } case QueryDebugLevel: @@ -195,13 +183,13 @@ void Query::deserialize(Serializer &ser, bool &hasJoinConditions) { strictMode = StrictMode(ser.GetVarUint()); break; case QueryLimit: - count = ser.GetVarUint(); + count_ = ser.GetVarUint(); break; case QueryOffset: - start = ser.GetVarUint(); + start_ = ser.GetVarUint(); break; case QueryReqTotal: - calcTotal = CalcTotalMode(ser.GetVarUint()); + calcTotal_ = CalcTotalMode(ser.GetVarUint()); break; case QuerySelectFilter: selectFilter_.push_back(std::string(ser.GetVString())); @@ -291,7 +279,7 @@ void Query::deserialize(Serializer &ser, bool &hasJoinConditions) { } void Query::Serialize(WrSerializer &ser, uint8_t mode) const { - ser.PutVString(_namespace); + ser.PutVString(NsName()); entries.Serialize(ser); for (const auto &agg : aggregations_) { @@ -328,10 +316,10 @@ void Query::Serialize(WrSerializer &ser, uint8_t mode) const { if (mode & WithJoinEntries) { for (const auto &qje : reinterpret_cast(this)->joinEntries_) { ser.PutVarUint(QueryJoinOn); - ser.PutVarUint(qje.op_); - ser.PutVarUint(qje.condition_); - ser.PutVString(qje.index_); - ser.PutVString(qje.joinIndex_); + ser.PutVarUint(qje.Operation()); + ser.PutVarUint(qje.Condition()); + ser.PutVString(qje.LeftFieldName()); + ser.PutVString(qje.RightFieldName()); } } @@ -364,17 +352,17 @@ void Query::Serialize(WrSerializer &ser, uint8_t mode) const { if (!(mode & SkipLimitOffset)) { if (HasLimit()) { ser.PutVarUint(QueryLimit); - ser.PutVarUint(count); + ser.PutVarUint(Limit()); } if (HasOffset()) { ser.PutVarUint(QueryOffset); - ser.PutVarUint(start); + ser.PutVarUint(Offset()); } } - if (calcTotal != ModeNoTotal) { + if (CalcTotal() != ModeNoTotal) { ser.PutVarUint(QueryReqTotal); - ser.PutVarUint(calcTotal); + ser.PutVarUint(CalcTotal()); } for (const auto &sf : selectFilter_) { @@ -426,7 +414,7 @@ void Query::Serialize(WrSerializer &ser, uint8_t mode) const { } void Query::Deserialize(Serializer &ser) { - _namespace = std::string(ser.GetVString()); + namespace_ = std::string(ser.GetVString()); bool hasJoinConditions = false; deserialize(ser, hasJoinConditions); @@ -445,37 +433,27 @@ void Query::Deserialize(Serializer &ser) { Query &q = nested ? mergeQueries_.back() : *this; if (joinType != JoinType::LeftJoin && !hasJoinConditions) { const size_t joinIdx = joinQueries_.size(); - entries.Append((joinType == JoinType::OrInnerJoin) ? OpOr : OpAnd, JoinQueryEntry{joinIdx}); + entries.Append((joinType == JoinType::OrInnerJoin) ? OpOr : OpAnd, joinIdx); } q.joinQueries_.emplace_back(std::move(q1)); } } } -Query &Query::Join(JoinType joinType, const std::string &index, const std::string &joinIndex, CondType cond, OpType op, Query &&qr) & { - QueryJoinEntry joinEntry; - joinEntry.op_ = op; - joinEntry.condition_ = cond; - joinEntry.index_ = index; - joinEntry.joinIndex_ = joinIndex; +Query &Query::Join(JoinType joinType, std::string leftField, std::string rightField, CondType cond, OpType op, Query &&qr) & { auto &jq = joinQueries_.emplace_back(joinType, std::move(qr)); - jq.joinEntries_.emplace_back(std::move(joinEntry)); + jq.joinEntries_.emplace_back(op, cond, std::move(leftField), std::move(rightField)); if (joinType != JoinType::LeftJoin) { - entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, JoinQueryEntry(joinQueries_.size() - 1)); + entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, joinQueries_.size() - 1); } return *this; } -Query &Query::Join(JoinType joinType, const std::string &index, const std::string &joinIndex, CondType cond, OpType op, const Query &qr) & { - QueryJoinEntry joinEntry; - joinEntry.op_ = op; - joinEntry.condition_ = cond; - joinEntry.index_ = index; - joinEntry.joinIndex_ = joinIndex; +Query &Query::Join(JoinType joinType, std::string leftField, std::string rightField, CondType cond, OpType op, const Query &qr) & { joinQueries_.emplace_back(joinType, qr); - joinQueries_.back().joinEntries_.emplace_back(std::move(joinEntry)); + joinQueries_.back().joinEntries_.emplace_back(op, cond, std::move(leftField), std::move(rightField)); if (joinType != JoinType::LeftJoin) { - entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, JoinQueryEntry(joinQueries_.size() - 1)); + entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, joinQueries_.size() - 1); } return *this; } @@ -483,7 +461,7 @@ Query &Query::Join(JoinType joinType, const std::string &index, const std::strin Query::OnHelper Query::Join(JoinType joinType, Query &&q) & { joinQueries_.emplace_back(joinType, std::move(q)); if (joinType != JoinType::LeftJoin) { - entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, JoinQueryEntry(joinQueries_.size() - 1)); + entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, joinQueries_.size() - 1); } return {*this, joinQueries_.back()}; } @@ -491,7 +469,7 @@ Query::OnHelper Query::Join(JoinType joinType, Query &&q) & { Query::OnHelper Query::Join(JoinType joinType, const Query &q) & { joinQueries_.emplace_back(joinType, q); if (joinType != JoinType::LeftJoin) { - entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, JoinQueryEntry(joinQueries_.size() - 1)); + entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, joinQueries_.size() - 1); } return {*this, joinQueries_.back()}; } @@ -499,7 +477,7 @@ Query::OnHelper Query::Join(JoinType joinType, const Query &q) & { Query::OnHelperR Query::Join(JoinType joinType, Query &&q) && { joinQueries_.emplace_back(joinType, std::move(q)); if (joinType != JoinType::LeftJoin) { - entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, JoinQueryEntry(joinQueries_.size() - 1)); + entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, joinQueries_.size() - 1); } return {std::move(*this), joinQueries_.back()}; } @@ -507,7 +485,7 @@ Query::OnHelperR Query::Join(JoinType joinType, Query &&q) && { Query::OnHelperR Query::Join(JoinType joinType, const Query &q) && { joinQueries_.emplace_back(joinType, q); if (joinType != JoinType::LeftJoin) { - entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, JoinQueryEntry(joinQueries_.size() - 1)); + entries.Append((joinType == JoinType::InnerJoin) ? OpType::OpAnd : OpType::OpOr, joinQueries_.size() - 1); } return {std::move(*this), joinQueries_.back()}; } @@ -548,11 +526,11 @@ void Query::WalkNested(bool withSelf, bool withMerged, const std::function(0) && kLsnIndexName == entries.Get(0).index) { + if (entries.Size() == 1 && entries.HoldsOrReferTo(0) && kLsnIndexName == entries.Get(0).FieldName()) { return true; } else if (entries.Size() == 2 && entries.HoldsOrReferTo(0) && entries.HoldsOrReferTo(1)) { - const auto &index0 = entries.Get(0).index; - const auto &index1 = entries.Get(1).index; + const auto &index0 = entries.Get(0).FieldName(); + const auto &index1 = entries.Get(1).FieldName(); return (kLsnIndexName == index0 && kSlaveVersionIndexName == index1) || (kLsnIndexName == index1 && kSlaveVersionIndexName == index0); } diff --git a/cpp_src/core/query/query.h b/cpp_src/core/query/query.h index 00b2b7203..355cd805d 100644 --- a/cpp_src/core/query/query.h +++ b/cpp_src/core/query/query.h @@ -71,8 +71,10 @@ class Query { /// @param start - number of the first row to get from selected set. Analog to sql OFFSET Offset. /// @param count - number of rows to get from result set. Analog to sql LIMIT RowsCount. /// @param calcTotal - calculation mode. - explicit Query(const std::string &nsName, unsigned start = QueryEntry::kDefaultOffset, unsigned count = QueryEntry::kDefaultLimit, - CalcTotalMode calcTotal = ModeNoTotal); + template + explicit Query(T nsName, unsigned start = QueryEntry::kDefaultOffset, unsigned count = QueryEntry::kDefaultLimit, + CalcTotalMode calcTotal = ModeNoTotal) + : namespace_(std::forward(nsName)), start_(start), count_(count), calcTotal_(calcTotal) {} /// Creates an empty object. Query() = default; @@ -94,6 +96,11 @@ class Query { /// @return Query in SQL format std::string GetSQL(bool stripArgs = false) const; + /// Logs query in 'Select field1, ... field N from namespace ...' format. + /// @param realType - replaces original query's type + /// @return Query in SQL format + std::string GetSQL(QueryType realType) const; + /// Parses JSON dsl set. /// @param dsl - dsl set. /// @return always returns errOk or throws an exception. @@ -112,79 +119,75 @@ class Query { Query &&Explain(bool on = true) && { return std::move(Explain(on)); } /// Adds a condition with a single value. Analog to sql Where clause. - /// @param idx - index used in condition clause. + /// @param field - field used in condition clause. /// @param cond - type of condition. /// @param val - value of index to be compared with. /// @return Query object ready to be executed. - template - Query &Where(const std::string &idx, CondType cond, Input val) & { - return Where(idx, cond, {val}); + template + Query &Where(Str &&field, CondType cond, Input val) & { + return Where(std::forward(field), cond, {std::forward(val)}); } - template - Query &&Where(const std::string &idx, CondType cond, Input val) && { - return std::move(Where(idx, cond, {val})); + template + Query &&Where(Str &&field, CondType cond, Input val) && { + return std::move(Where(std::forward(field), cond, {std::move(val)})); } /// Adds a condition with several values. Analog to sql Where clause. - /// @param idx - index used in condition clause. + /// @param field - field used in condition clause. /// @param cond - type of condition. /// @param l - list of index values to be compared with. /// @return Query object ready to be executed. - template - Query &Where(const std::string &idx, CondType cond, std::initializer_list l) & { - QueryEntry qe; - qe.condition = cond; - qe.index = idx; - for (auto it = l.begin(); it != l.end(); it++) qe.values.push_back(Variant(*it)); - entries.Append(nextOp_, std::move(qe)); + template + Query &Where(Str &&field, CondType cond, std::initializer_list l) & { + VariantArray values; + values.reserve(l.size()); + for (auto it = l.begin(); it != l.end(); it++) values.emplace_back(*it); + entries.Append(nextOp_, std::forward(field), cond, std::move(values)); nextOp_ = OpAnd; return *this; } - template - Query &&Where(const std::string &idx, CondType cond, std::initializer_list l) && { - return std::move(Where(idx, cond, std::move(l))); + template + Query &&Where(Str &&field, CondType cond, std::initializer_list l) && { + return std::move(Where(std::forward(field), cond, std::move(l))); } /// Adds a condition with several values. Analog to sql Where clause. - /// @param idx - index used in condition clause. + /// @param field - field used in condition clause. /// @param cond - type of condition. /// @param l - vector of index values to be compared with. /// @return Query object ready to be executed. - template - Query &Where(const std::string &idx, CondType cond, const std::vector &l) & { - QueryEntry qe; - qe.condition = cond; - qe.index = idx; - qe.values.reserve(l.size()); - for (auto it = l.begin(); it != l.end(); it++) qe.values.push_back(Variant(*it)); - entries.Append(nextOp_, std::move(qe)); + template + Query &Where(Str &&field, CondType cond, const std::vector &l) & { + VariantArray values; + values.reserve(l.size()); + for (auto it = l.begin(); it != l.end(); it++) values.emplace_back(*it); + entries.Append(nextOp_, std::forward(field), cond, std::move(values)); nextOp_ = OpAnd; return *this; } - template - Query &&Where(const std::string &idx, CondType cond, const std::vector &l) && { - return std::move(Where(idx, cond, l)); + template + Query &&Where(Str &&field, CondType cond, const std::vector &l) && { + return std::move(Where(std::forward(field), cond, l)); } /// Adds a condition with several values. Analog to sql Where clause. - /// @param idx - index used in condition clause. + /// @param field - field used in condition clause. /// @param cond - type of condition. /// @param l - vector of index values to be compared with. /// @return Query object ready to be executed. - Query &Where(const std::string &idx, CondType cond, const VariantArray &l) & { - QueryEntry qe; - qe.condition = cond; - qe.index = idx; - qe.values.reserve(l.size()); - for (auto it = l.begin(); it != l.end(); it++) qe.values.push_back(Variant(*it)); - entries.Append(nextOp_, std::move(qe)); + template + Query &Where(Str &&field, CondType cond, VariantArray l) & { + entries.Append(nextOp_, std::forward(field), cond, std::move(l)); nextOp_ = OpAnd; return *this; } - Query &&Where(const std::string &idx, CondType cond, const VariantArray &l) && { return std::move(Where(idx, cond, l)); } + template + Query &&Where(Str &&field, CondType cond, VariantArray l) && { + return std::move(Where(std::forward(field), cond, std::move(l))); + } /// Adds a condition with several values to a composite index. - /// @param idx - index name. + /// @param idx - composite index name. /// @param cond - type of condition. /// @param l - list of values to be compared according to the order /// of indexes in composite index name. @@ -194,172 +197,192 @@ class Query { /// in case of CondRange) belongs to "bookid" and l[0][1] (and l[1][1] in case of CondRange) /// belongs to "price" indexes. /// @return Query object ready to be executed. - Query &WhereComposite(const std::string &idx, CondType cond, std::initializer_list l) & { - QueryEntry qe; - qe.condition = cond; - qe.index = idx; - qe.values.reserve(l.size()); + template + Query &WhereComposite(Str &&idx, CondType cond, std::initializer_list l) & { + VariantArray values; + values.reserve(l.size()); for (auto it = l.begin(); it != l.end(); it++) { - qe.values.push_back(Variant(*it)); + values.emplace_back(*it); } - entries.Append(nextOp_, std::move(qe)); + entries.Append(nextOp_, std::forward(idx), cond, std::move(values)); nextOp_ = OpAnd; return *this; } - Query &&WhereComposite(const std::string &idx, CondType cond, std::initializer_list l) && { - return std::move(WhereComposite(idx, cond, l)); + template + Query &&WhereComposite(Str &&idx, CondType cond, std::initializer_list l) && { + return std::move(WhereComposite(std::forward(idx), cond, std::move(l))); } - Query &WhereComposite(const std::string &idx, CondType cond, const std::vector &v) & { - QueryEntry qe; - qe.condition = cond; - qe.index = idx; - qe.values.reserve(v.size()); + template + Query &WhereComposite(Str &&idx, CondType cond, const std::vector &v) & { + VariantArray values; + values.reserve(v.size()); for (auto it = v.begin(); it != v.end(); it++) { - qe.values.push_back(Variant(*it)); + values.emplace_back(*it); } - entries.Append(nextOp_, std::move(qe)); + entries.Append(nextOp_, std::forward(idx), cond, std::move(values)); nextOp_ = OpAnd; return *this; } - Query &&WhereComposite(const std::string &idx, CondType cond, const std::vector &v) && { - return std::move(WhereComposite(idx, cond, v)); + template + Query &&WhereComposite(Str &&idx, CondType cond, const std::vector &v) && { + return std::move(WhereComposite(std::forward(idx), cond, v)); } - Query &WhereBetweenFields(std::string firstIdx, CondType cond, std::string secondIdx) & { - entries.Append(nextOp_, BetweenFieldsQueryEntry{std::move(firstIdx), cond, std::move(secondIdx)}); + template + Query &WhereBetweenFields(Str1 &&firstIdx, CondType cond, Str2 &&secondIdx) & { + entries.Append(nextOp_, std::forward(firstIdx), cond, std::forward(secondIdx)); nextOp_ = OpAnd; return *this; } - Query &&WhereBetweenFields(std::string firstIdx, CondType cond, std::string secondIdx) && { - return std::move(WhereBetweenFields(std::move(firstIdx), cond, std::move(secondIdx))); + template + Query &&WhereBetweenFields(Str1 &&firstIdx, CondType cond, Str2 &&secondIdx) && { + return std::move(WhereBetweenFields(std::forward(firstIdx), cond, std::forward(secondIdx))); } - Query &DWithin(const std::string &idx, Point p, double distance) & { - QueryEntry qe; - qe.condition = CondDWithin; - qe.index = idx; - qe.values.reserve(2); - qe.values.emplace_back(p); - qe.values.emplace_back(distance); - entries.Append(nextOp_, std::move(qe)); + template + Query &DWithin(Str &&field, Point p, double distance) & { + entries.Append(nextOp_, std::forward(field), CondDWithin, VariantArray::Create(p, distance)); nextOp_ = OpAnd; return *this; } - Query &&DWithin(const std::string &idx, Point p, double distance) && { return std::move(DWithin(idx, p, distance)); } + template + Query &&DWithin(Str &&field, Point p, double distance) && { + return std::move(DWithin(std::forward(field), p, distance)); + } /// Sets a new value for a field. /// @param field - field name. /// @param value - new value. /// @param hasExpressions - true: value has expresions in it - template - Query &Set(std::string field, ValueType value, bool hasExpressions = false) & { - return Set(std::move(field), {value}, hasExpressions); + template + Query &Set(Str &&field, ValueType value, bool hasExpressions = false) & { + return Set(std::forward(field), {value}, hasExpressions); } - template - Query &&Set(std::string field, ValueType value, bool hasExpressions = false) && { - return std::move(Set(std::move(field), std::move(value), hasExpressions)); + template + Query &&Set(Str &&field, ValueType value, bool hasExpressions = false) && { + return std::move(Set(std::forward(field), std::move(value), hasExpressions)); } /// Sets a new value for a field. /// @param field - field name. /// @param l - new value. /// @param hasExpressions - true: value has expresions in it - template - Query &Set(std::string field, std::initializer_list l, bool hasExpressions = false) & { + template + Query &Set(Str &&field, std::initializer_list l, bool hasExpressions = false) & { VariantArray value; value.reserve(l.size()); for (auto it = l.begin(); it != l.end(); it++) value.emplace_back(*it); - return Set(std::move(field), std::move(value), hasExpressions); + return Set(std::forward(field), std::move(value), hasExpressions); } - template - Query &&Set(std::string field, std::initializer_list l, bool hasExpressions = false) && { - return std::move(Set(std::move(field), std::move(l), hasExpressions)); + template + Query &&Set(Str &&field, std::initializer_list l, bool hasExpressions = false) && { + return std::move(Set(std::forward(field), std::move(l), hasExpressions)); } /// Sets a new value for a field. /// @param field - field name. /// @param l - new value. /// @param hasExpressions - true: value has expresions in it - template - Query &Set(std::string field, const std::vector &l, bool hasExpressions = false) & { + template + Query &Set(Str &&field, const std::vector &l, bool hasExpressions = false) & { VariantArray value; value.reserve(l.size()); for (auto it = l.begin(); it != l.end(); it++) value.emplace_back(*it); - return Set(std::move(field), std::move(value.MarkArray()), hasExpressions); + return Set(std::forward(field), std::move(value.MarkArray()), hasExpressions); } - template - Query &&Set(std::string field, const std::vector &l, bool hasExpressions = false) && { - return std::move(Set(std::move(field), l, hasExpressions)); + template + Query &&Set(Str &&field, const std::vector &l, bool hasExpressions = false) && { + return std::move(Set(std::forward(field), l, hasExpressions)); } /// Sets a new value for a field. /// @param field - field name. /// @param value - new value. /// @param hasExpressions - true: value has expresions in it - Query &Set(std::string field, VariantArray value, bool hasExpressions = false) & { - updateFields_.emplace_back(std::move(field), std::move(value), FieldModeSet, hasExpressions); + template + Query &Set(Str &&field, VariantArray value, bool hasExpressions = false) & { + updateFields_.emplace_back(std::forward(field), std::move(value), FieldModeSet, hasExpressions); return *this; } - Query &&Set(std::string field, VariantArray value, bool hasExpressions = false) && { - return std::move(Set(std::move(field), std::move(value), hasExpressions)); + template + Query &&Set(Str &&field, VariantArray value, bool hasExpressions = false) && { + return std::move(Set(std::forward(field), std::move(value), hasExpressions)); } /// Sets a value for a field as an object. /// @param field - field name. /// @param value - new value. /// @param hasExpressions - true: value has expresions in it - template - Query &SetObject(std::string field, ValueType value, bool hasExpressions = false) & { - return SetObject(std::move(field), {value}, hasExpressions); + template + Query &SetObject(Str &&field, ValueType value, bool hasExpressions = false) & { + return SetObject(std::forward(field), {value}, hasExpressions); } - template - Query &&SetObject(std::string field, ValueType value, bool hasExpressions = false) && { - return std::move(SetObject(std::move(field), std::move(value), hasExpressions)); + template + Query &&SetObject(Str &&field, ValueType value, bool hasExpressions = false) && { + return std::move(SetObject(std::forward(field), std::move(value), hasExpressions)); } /// Sets a new value for a field as an object. /// @param field - field name. /// @param l - new value. /// @param hasExpressions - true: value has expresions in it - template - Query &SetObject(std::string field, std::initializer_list l, bool hasExpressions = false) & { + template + Query &SetObject(Str &&field, std::initializer_list l, bool hasExpressions = false) & { VariantArray value; value.reserve(l.size()); - for (auto it = l.begin(); it != l.end(); it++) value.emplace_back(Variant(*it)); - return SetObject(std::move(field), std::move(value), hasExpressions); + for (auto it = l.begin(); it != l.end(); it++) value.emplace_back(*it); + return SetObject(std::forward(field), std::move(value), hasExpressions); } - template - Query &&SetObject(std::string field, std::initializer_list l, bool hasExpressions = false) && { - return std::move(SetObject(std::move(field), std::move(l), hasExpressions)); + template + Query &&SetObject(Str &&field, std::initializer_list l, bool hasExpressions = false) && { + return std::move(SetObject(std::forward(field), std::move(l), hasExpressions)); } /// Sets a new value for a field as an object. /// @param field - field name. /// @param l - new value. /// @param hasExpressions - true: value has expresions in it - template - Query &SetObject(std::string field, const std::vector &l, bool hasExpressions = false) & { + template + Query &SetObject(Str &&field, const std::vector &l, bool hasExpressions = false) & { VariantArray value; value.reserve(l.size()); for (auto it = l.begin(); it != l.end(); it++) value.emplace_back(Variant(*it)); - return SetObject(std::move(field), std::move(value.MarkArray()), hasExpressions); + return SetObject(std::forward(field), std::move(value.MarkArray()), hasExpressions); } - template - Query &&SetObject(std::string field, const std::vector &l, bool hasExpressions = false) && { - return std::move(SetObject(std::move(field), l, hasExpressions)); + template + Query &&SetObject(Str &&field, const std::vector &l, bool hasExpressions = false) && { + return std::move(SetObject(std::forward(field), l, hasExpressions)); } /// Sets a value for a field as an object. /// @param field - field name. /// @param value - new value. /// @param hasExpressions - true: value has expresions in it - Query &SetObject(std::string field, VariantArray value, bool hasExpressions = false) &; - Query &&SetObject(std::string field, VariantArray value, bool hasExpressions = false) && { - return std::move(SetObject(std::move(field), std::move(value), hasExpressions)); + template + Query &SetObject(Str &&field, VariantArray value, bool hasExpressions = false) & { + for (auto &it : value) { + if (!it.Type().Is()) { + throw Error(errLogic, "Unexpected variant type in SetObject: %s. Expecting KeyValueType::String with JSON-content", + it.Type().Name()); + } + } + updateFields_.emplace_back(std::forward(field), std::move(value), FieldModeSetJson, hasExpressions); + return *this; + } + template + Query &&SetObject(Str &&field, VariantArray value, bool hasExpressions = false) && { + return std::move(SetObject(std::forward(field), std::move(value), hasExpressions)); } /// Drops a value for a field. /// @param field - field name. - Query &Drop(std::string field) & { - updateFields_.emplace_back(std::move(field), VariantArray(), FieldModeDrop); + template + Query &Drop(Str &&field) & { + updateFields_.emplace_back(std::forward(field), VariantArray(), FieldModeDrop); return *this; } - Query &&Drop(std::string field) && { return std::move(Drop(std::move(field))); } + template + Query &&Drop(Str &&field) && { + return std::move(Drop(std::forward(field))); + } /// Add sql-function to query. /// @param function - function declaration. - void AddFunction(std::string function) { selectFunctions_.emplace_back(std::move(function)); } + template + void AddFunction(Str &&function) { + selectFunctions_.emplace_back(std::forward(function)); + } /// Adds equal position fields to arrays queries. /// @param equalPosition - list of fields with equal array index position. @@ -387,19 +410,21 @@ class Query { /// Joins namespace with another namespace. Analog to sql JOIN. /// @param joinType - type of Join (Inner, Left or OrInner). - /// @param index - name of the field in the namespace of this Query object. - /// @param joinIndex - name of the field in the namespace of qr Query object. + /// @param leftField - name of the field in the namespace of this Query object. + /// @param rightField - name of the field in the namespace of qr Query object. /// @param cond - condition type (Eq, Leq, Geq, etc). /// @param op - operation type (and, or, not). /// @param qr - query of the namespace that is going to be joined with this one. /// @return Query object ready to be executed. - Query &Join(JoinType joinType, const std::string &index, const std::string &joinIndex, CondType cond, OpType op, Query &&qr) &; - Query &&Join(JoinType joinType, const std::string &index, const std::string &joinIndex, CondType cond, OpType op, Query &&qr) && { - return std::move(Join(joinType, index, joinIndex, cond, op, std::move(qr))); + Query &Join(JoinType joinType, std::string leftField, std::string rightField, CondType cond, OpType op, Query &&qr) &; + template + Query &&Join(JoinType joinType, StrL &&leftField, StrR &&rightField, CondType cond, OpType op, Query &&qr) && { + return std::move(Join(joinType, std::forward(leftField), std::forward(rightField), cond, op, std::move(qr))); } - Query &Join(JoinType joinType, const std::string &index, const std::string &joinIndex, CondType cond, OpType op, const Query &qr) &; - Query &&Join(JoinType joinType, const std::string &index, const std::string &joinIndex, CondType cond, OpType op, const Query &qr) && { - return std::move(Join(joinType, index, joinIndex, cond, op, qr)); + Query &Join(JoinType joinType, std::string leftField, std::string rightField, CondType cond, OpType op, const Query &qr) &; + template + Query &&Join(JoinType joinType, StrL &&leftField, StrR &&rightField, CondType cond, OpType op, const Query &qr) && { + return std::move(Join(joinType, std::forward(leftField), std::forward(rightField), cond, op, qr)); } OnHelper Join(JoinType joinType, Query &&q) &; @@ -409,60 +434,72 @@ class Query { /// @public /// Inner Join of this namespace with another one. - /// @param index - name of the field in the namespace of this Query object. - /// @param joinIndex - name of the field in the namespace of qr Query object. + /// @param leftField - name of the field in the namespace of this Query object. + /// @param rightField - name of the field in the namespace of qr Query object. /// @param cond - condition type (Eq, Leq, Geq, etc). /// @param qr - query of the namespace that is going to be joined with this one. /// @return Query object ready to be executed. - Query &InnerJoin(const std::string &index, const std::string &joinIndex, CondType cond, Query &&qr) & { // -V1071 - return Join(JoinType::InnerJoin, index, joinIndex, cond, OpAnd, std::move(qr)); + template + Query &InnerJoin(StrL &&leftField, StrR &&rightField, CondType cond, Query &&qr) & { // -V1071 + return Join(JoinType::InnerJoin, std::forward(leftField), std::forward(rightField), cond, OpAnd, std::move(qr)); } - Query &&InnerJoin(const std::string &index, const std::string &joinIndex, CondType cond, Query &&qr) && { - return std::move(InnerJoin(index, joinIndex, cond, std::move(qr))); + template + Query &&InnerJoin(StrL &&leftField, StrR &&rightField, CondType cond, Query &&qr) && { + return std::move(InnerJoin(std::forward(leftField), std::forward(rightField), cond, std::move(qr))); } - Query &InnerJoin(const std::string &index, const std::string &joinIndex, CondType cond, const Query &qr) & { - return Join(JoinType::InnerJoin, index, joinIndex, cond, OpAnd, qr); + template + Query &InnerJoin(StrL &&leftField, StrR &&rightField, CondType cond, const Query &qr) & { + return Join(JoinType::InnerJoin, std::forward(leftField), std::forward(rightField), cond, OpAnd, qr); } - Query &&InnerJoin(const std::string &index, const std::string &joinIndex, CondType cond, const Query &qr) && { - return std::move(InnerJoin(index, joinIndex, cond, qr)); + template + Query &&InnerJoin(StrL &&leftField, StrR &&rightField, CondType cond, const Query &qr) && { + return std::move(InnerJoin(std::forward(leftField), std::forward(rightField), cond, qr)); } /// Left Join of this namespace with another one. - /// @param index - name of the field in the namespace of this Query object. - /// @param joinIndex - name of the field in the namespace of qr Query object. + /// @param leftField - name of the field in the namespace of this Query object. + /// @param rightField - name of the field in the namespace of qr Query object. /// @param cond - condition type (Eq, Leq, Geq, etc). /// @param qr - query of the namespace that is going to be joined with this one. /// @return Query object ready to be executed. - Query &LeftJoin(const std::string &index, const std::string &joinIndex, CondType cond, Query &&qr) & { - return Join(JoinType::LeftJoin, index, joinIndex, cond, OpAnd, std::move(qr)); + template + Query &LeftJoin(StrL &&leftField, StrR &&rightField, CondType cond, Query &&qr) & { + return Join(JoinType::LeftJoin, std::forward(leftField), std::forward(rightField), cond, OpAnd, std::move(qr)); } - Query &&LeftJoin(const std::string &index, const std::string &joinIndex, CondType cond, Query &&qr) && { - return std::move(LeftJoin(index, joinIndex, cond, std::move(qr))); + template + Query &&LeftJoin(StrL &&leftField, StrR &&rightField, CondType cond, Query &&qr) && { + return std::move(LeftJoin(std::forward(leftField), std::forward(rightField), cond, std::move(qr))); } - Query &LeftJoin(const std::string &index, const std::string &joinIndex, CondType cond, const Query &qr) & { - return Join(JoinType::LeftJoin, index, joinIndex, cond, OpAnd, qr); + template + Query &LeftJoin(StrL &&leftField, StrR &&rightField, CondType cond, const Query &qr) & { + return Join(JoinType::LeftJoin, std::forward(leftField), std::forward(rightField), cond, OpAnd, qr); } - Query &&LeftJoin(const std::string &index, const std::string &joinIndex, CondType cond, const Query &qr) && { - return std::move(LeftJoin(index, joinIndex, cond, qr)); + template + Query &&LeftJoin(StrL &&leftField, StrR &&rightField, CondType cond, const Query &qr) && { + return std::move(LeftJoin(std::forward(leftField), std::forward(rightField), cond, qr)); } /// OrInnerJoin of this namespace with another one. - /// @param index - name of the field in the namespace of this Query object. - /// @param joinIndex - name of the field in the namespace of qr Query object. + /// @param leftField - name of the field in the namespace of this Query object. + /// @param rightField - name of the field in the namespace of qr Query object. /// @param cond - condition type (Eq, Leq, Geq, etc). /// @param qr - query of the namespace that is going to be joined with this one. /// @return a reference to a query object ready to be executed. - Query &OrInnerJoin(const std::string &index, const std::string &joinIndex, CondType cond, Query &&qr) & { - return Join(JoinType::OrInnerJoin, index, joinIndex, cond, OpAnd, std::move(qr)); + template + Query &OrInnerJoin(StrL &&leftField, StrR &&rightField, CondType cond, Query &&qr) & { + return Join(JoinType::OrInnerJoin, std::forward(leftField), std::forward(rightField), cond, OpAnd, std::move(qr)); } - Query &&OrInnerJoin(const std::string &index, const std::string &joinIndex, CondType cond, Query &&qr) && { - return std::move(OrInnerJoin(index, joinIndex, cond, std::move(qr))); + template + Query &&OrInnerJoin(StrL &&leftField, StrR &&rightField, CondType cond, Query &&qr) && { + return std::move(OrInnerJoin(std::forward(leftField), std::forward(rightField), cond, std::move(qr))); } - Query &OrInnerJoin(const std::string &index, const std::string &joinIndex, CondType cond, const Query &qr) & { - return Join(JoinType::OrInnerJoin, index, joinIndex, cond, OpAnd, qr); + template + Query &OrInnerJoin(StrL &&leftField, StrR &&rightField, CondType cond, const Query &qr) & { + return Join(JoinType::OrInnerJoin, std::forward(leftField), std::forward(rightField), cond, OpAnd, qr); } - Query &&OrInnerJoin(const std::string &index, const std::string &joinIndex, CondType cond, const Query &qr) && { - return std::move(OrInnerJoin(index, joinIndex, cond, qr)); + template + Query &&OrInnerJoin(StrL &&leftField, StrR &&rightField, CondType cond, const Query &qr) && { + return std::move(OrInnerJoin(std::forward(leftField), std::forward(rightField), cond, qr)); } Query &Merge(const Query &q) &; Query &&Merge(const Query &q) && { return std::move(Merge(q)); } @@ -491,11 +528,15 @@ class Query { /// @param sort - sorting column name. /// @param desc - is sorting direction descending or ascending. /// @return Query object. - Query &Sort(std::string sort, bool desc) & { // -V1071 - if (sort.length()) sortingEntries_.emplace_back(std::move(sort), desc); + template + Query &Sort(Str &&sort, bool desc) & { // -V1071 + if (!strEmpty(sort)) sortingEntries_.emplace_back(std::forward(sort), desc); return *this; } - Query &&Sort(std::string sort, bool desc) && { return std::move(Sort(std::move(sort), desc)); } + template + Query &&Sort(Str &&sort, bool desc) && { + return std::move(Sort(std::forward(sort), desc)); + } /// Performs sorting by ST_Distance() expressions for geometry index. Sorting function will use distance between field and target point. /// @param field - field's name. This field must contain Point. @@ -519,17 +560,17 @@ class Query { /// @param desc - is sorting direction descending or ascending. /// @param forcedSortOrder - list of values for forced sort order. /// @return Query object. - template - Query &Sort(std::string sort, bool desc, std::initializer_list forcedSortOrder) & { + template + Query &Sort(Str &&sort, bool desc, std::initializer_list forcedSortOrder) & { if (!sortingEntries_.empty() && !std::empty(forcedSortOrder)) throw Error(errParams, "Forced sort order is allowed for the first sorting entry only"); - sortingEntries_.emplace_back(std::move(sort), desc); + sortingEntries_.emplace_back(std::forward(sort), desc); for (const T &v : forcedSortOrder) forcedSortOrder_.emplace_back(v); return *this; } - template - Query &&Sort(std::string sort, bool desc, std::initializer_list forcedSortOrder) && { - return std::move(Sort(std::move(sort), desc, std::move(forcedSortOrder))); + template + Query &&Sort(Str &&sort, bool desc, std::initializer_list forcedSortOrder) && { + return std::move(Sort(std::forward(sort), desc, std::move(forcedSortOrder))); } /// Performs sorting by certain column. Analog to sql ORDER BY. @@ -537,28 +578,32 @@ class Query { /// @param desc - is sorting direction descending or ascending. /// @param forcedSortOrder - list of values for forced sort order. /// @return Query object. - template - Query &Sort(std::string sort, bool desc, const T &forcedSortOrder) & { + template + Query &Sort(Str &&sort, bool desc, const T &forcedSortOrder) & { if (!sortingEntries_.empty() && !forcedSortOrder.empty()) throw Error(errParams, "Forced sort order is allowed for the first sorting entry only"); - sortingEntries_.emplace_back(std::move(sort), desc); + sortingEntries_.emplace_back(std::forward(sort), desc); for (const auto &v : forcedSortOrder) forcedSortOrder_.emplace_back(v); return *this; } - template - Query &&Sort(std::string sort, bool desc, const T &forcedSortOrder) && { - return std::move(Sort(std::move(sort), desc, forcedSortOrder)); + template + Query &&Sort(Str &&sort, bool desc, const T &forcedSortOrder) && { + return std::move(Sort(std::forward(sort), desc, forcedSortOrder)); } /// Performs distinct for a certain index. /// @param indexName - name of index for distict operation. - Query &Distinct(std::string indexName) & { - if (indexName.length()) { - aggregations_.emplace_back(AggDistinct, h_vector{std::move(indexName)}); + template + Query &Distinct(Str &&indexName) & { + if (!strEmpty(indexName)) { + aggregations_.emplace_back(AggDistinct, h_vector{std::forward(indexName)}); } return *this; } - Query &&Distinct(std::string indexName) && { return std::move(Distinct(std::move(indexName))); } + template + Query &&Distinct(Str &&indexName) && { + return std::move(Distinct(std::forward(indexName))); + } /// Sets list of columns in this namespace to be finally selected. /// @param l - list of columns to be selected. @@ -636,7 +681,7 @@ class Query { /// @param limit - number of rows to get from result set. /// @return Query object. Query &Limit(unsigned limit) &noexcept { - count = limit; + count_ = limit; return *this; } Query &&Limit(unsigned limit) &&noexcept { return std::move(Limit(limit)); } @@ -646,7 +691,7 @@ class Query { /// @param offset - index of the first row to get from result set. /// @return Query object. Query &Offset(unsigned offset) &noexcept { - start = offset; + start_ = offset; return *this; } Query &&Offset(unsigned offset) &&noexcept { return std::move(Offset(offset)); } @@ -654,7 +699,7 @@ class Query { /// Set the total count calculation mode to Accurate /// @return Query object Query &ReqTotal() &noexcept { - calcTotal = ModeAccurateTotal; + calcTotal_ = ModeAccurateTotal; return *this; } Query &&ReqTotal() &&noexcept { return std::move(ReqTotal()); } @@ -663,7 +708,7 @@ class Query { /// It will be use LRUCache for total count result /// @return Query object Query &CachedTotal() &noexcept { - calcTotal = ModeCachedTotal; + calcTotal_ = ModeCachedTotal; return *this; } Query &&CachedTotal() &&noexcept { return std::move(CachedTotal()); } @@ -696,38 +741,47 @@ class Query { void WalkNested(bool withSelf, bool withMerged, const std::function &visitor) const; - bool HasLimit() const noexcept { return count != QueryEntry::kDefaultLimit; } - bool HasOffset() const noexcept { return start != QueryEntry::kDefaultOffset; } + bool HasLimit() const noexcept { return count_ != QueryEntry::kDefaultLimit; } + bool HasOffset() const noexcept { return start_ != QueryEntry::kDefaultOffset; } bool IsWALQuery() const noexcept; const std::vector &UpdateFields() const noexcept { return updateFields_; } QueryType Type() const noexcept { return type_; } - -protected: - void deserialize(Serializer &ser, bool &hasJoinConditions); - -public: - std::string _namespace; /// Name of the namespace. - unsigned start = QueryEntry::kDefaultOffset; /// First row index from result set. - unsigned count = QueryEntry::kDefaultLimit; /// Number of rows from result set. - int debugLevel = 0; /// Debug level. - StrictMode strictMode = StrictModeNotSet; /// Strict mode. - bool explain_ = false; /// Explain query if true - CalcTotalMode calcTotal = ModeNoTotal; /// Calculation mode. - QueryType type_ = QuerySelect; /// Query type - OpType nextOp_ = OpAnd; /// Next operation constant. - SortingEntries sortingEntries_; /// Sorting data. - std::vector forcedSortOrder_; /// Keys that always go first - before any ordered values. - std::vector joinQueries_; /// List of queries for join. - std::vector mergeQueries_; /// List of merge queries. - h_vector selectFilter_; /// List of columns in a final result set. - std::vector selectFunctions_; /// List of sql functions + const std::string &NsName() const &noexcept { return namespace_; } + template + void SetNsName(T &&nsName) &noexcept { + namespace_ = std::forward(nsName); + } + unsigned Limit() const noexcept { return count_; } + unsigned Offset() const noexcept { return start_; } + CalcTotalMode CalcTotal() const noexcept { return calcTotal_; } + void CalcTotal(CalcTotalMode calcTotal) noexcept { calcTotal_ = calcTotal; } + + int debugLevel = 0; /// Debug level. + StrictMode strictMode = StrictModeNotSet; /// Strict mode. + bool explain_ = false; /// Explain query if true + QueryType type_ = QuerySelect; /// Query type + OpType nextOp_ = OpAnd; /// Next operation constant. + SortingEntries sortingEntries_; /// Sorting data. + std::vector forcedSortOrder_; /// Keys that always go first - before any ordered values. + std::vector joinQueries_; /// List of queries for join. + std::vector mergeQueries_; /// List of merge queries. + h_vector selectFilter_; /// List of columns in a final result set. + std::vector selectFunctions_; /// List of sql functions QueryEntries entries; std::vector aggregations_; + auto NsName() const && = delete; + private: - std::vector updateFields_; /// List of fields (and values) for update. + void deserialize(Serializer &ser, bool &hasJoinConditions); + + std::string namespace_; /// Name of the namespace. + unsigned start_ = QueryEntry::kDefaultOffset; /// First row index from result set. + unsigned count_ = QueryEntry::kDefaultLimit; /// Number of rows from result set. + CalcTotalMode calcTotal_ = ModeNoTotal; /// Calculation mode. + std::vector updateFields_; /// List of fields (and values) for update. bool withRank_ = false; friend class SQLParser; }; diff --git a/cpp_src/core/query/queryentry.cc b/cpp_src/core/query/queryentry.cc index a7cc7f4e3..d7148dc49 100644 --- a/cpp_src/core/query/queryentry.cc +++ b/cpp_src/core/query/queryentry.cc @@ -8,7 +8,6 @@ #include "query.h" #include "tools/serializer.h" #include "tools/string_regexp_functions.h" -#include "tools/stringstools.h" namespace reindexer { @@ -21,11 +20,11 @@ std::string JoinQueryEntry::Dump(const std::vector &joinedSelectors) const { ser << '('; for (const auto &jqe : q.joinEntries_) { if (&jqe != &q.joinEntries_.front()) { - ser << ' ' << jqe.op_ << ' '; + ser << ' ' << jqe.Operation() << ' '; } else { - assertrx(jqe.op_ == OpAnd); + assertrx(jqe.Operation() == OpAnd); } - ser << q._namespace << '.' << jqe.joinIndex_ << ' ' << InvertJoinCondition(jqe.condition_) << ' ' << jqe.index_; + ser << q.NsName() << '.' << jqe.RightFieldName() << ' ' << InvertJoinCondition(jqe.Condition()) << ' ' << jqe.LeftFieldName(); } ser << ')'; return std::string{ser.Slice()}; @@ -41,30 +40,133 @@ std::string JoinQueryEntry::DumpOnCondition(const std::vector &joinedSelecto ser << js.Type() << " ON ("; for (const auto &jqe : q.joinEntries_) { if (&jqe != &q.joinEntries_.front()) { - ser << ' ' << jqe.op_ << ' '; + ser << ' ' << jqe.Operation() << ' '; } - ser << q._namespace << '.' << jqe.joinIndex_ << ' ' << InvertJoinCondition(jqe.condition_) << ' ' << jqe.index_; + ser << q.NsName() << '.' << jqe.RightFieldName() << ' ' << InvertJoinCondition(jqe.Condition()) << ' ' << jqe.LeftFieldName(); } ser << ')'; return std::string{ser.Slice()}; } template std::string JoinQueryEntry::DumpOnCondition(const JoinedSelectors &) const; -bool QueryEntry::operator==(const QueryEntry &obj) const { - return condition == obj.condition && index == obj.index && idxNo == obj.idxNo && distinct == obj.distinct && - values.RelaxCompare(obj.values) == 0; +bool QueryField::operator==(const QueryField &other) const noexcept { + if (fieldName_ != other.fieldName_ || idxNo_ != other.idxNo_ || fieldsSet_ != other.fieldsSet_ || + !fieldType_.IsSame(other.fieldType_) || !selectType_.IsSame(other.selectType_) || + compositeFieldsTypes_.size() != other.compositeFieldsTypes_.size()) { + return false; + } + for (size_t i = 0, s = compositeFieldsTypes_.size(); i < s; ++i) { + if (!compositeFieldsTypes_[i].IsSame(other.compositeFieldsTypes_[i])) { + return false; + } + } + return true; +} + +void QueryField::SetField(FieldsSet &&fields) & { + assertrx_throw(fields.size() == 1); + assertrx_throw(fields[0] == IndexValueType::SetByJsonPath); + assertrx_throw(idxNo_ == IndexValueType::NotSet); + idxNo_ = IndexValueType::SetByJsonPath; + fieldsSet_ = std::move(fields); +} + +static void checkIndexData([[maybe_unused]] int idxNo, [[maybe_unused]] const FieldsSet &fields, KeyValueType fieldType, + [[maybe_unused]] const std::vector &compositeFieldsTypes) { + assertrx_throw(idxNo >= 0); + if (fieldType.Is()) { + assertrx_throw(fields.size() == compositeFieldsTypes.size()); + } else { + assertrx_throw(fields.size() == 1); + assertrx_throw(compositeFieldsTypes.empty()); + } +} + +void QueryField::SetIndexData(int idxNo, FieldsSet &&fields, KeyValueType fieldType, KeyValueType selectType, + std::vector &&compositeFieldsTypes) & { + checkIndexData(idxNo, fields, fieldType, compositeFieldsTypes); + idxNo_ = idxNo; + fieldsSet_ = std::move(fields); + fieldType_ = fieldType; + selectType_ = selectType; + compositeFieldsTypes_ = std::move(compositeFieldsTypes); +} + +bool QueryField::HaveEmptyField() const noexcept { + size_t tagsNo = 0; + for (auto f : Fields()) { + if (f == IndexValueType::SetByJsonPath) { + if (Fields().getTagsPath(tagsNo).empty()) { + return true; + } + ++tagsNo; + } + } + return Fields().empty(); +} + +bool QueryEntry::operator==(const QueryEntry &other) const noexcept { + return QueryField::operator==(other) && condition_ == other.condition_ && distinct_ == other.distinct_ && + values_.RelaxCompare(other.values_) == 0; } +template +void QueryEntry::verify(CondType cond, const VariantArray &values) { + if constexpr (flags & kIgnoreEmptyValues) { + if (values.empty()) { + return; + } + } + const auto checkArgsCount = [&](size_t argsCountReq) { + if (values.size() != argsCountReq) { + throw Error{errLogic, "Condition %s must have exact %d argument, but %d arguments was provided", CondTypeToStr(cond), + argsCountReq, values.size()}; + } + }; + switch (cond) { + case CondEq: + case CondSet: + case CondAllSet: + break; + case CondAny: + case CondEmpty: + if (!values.empty() && !(values.size() == 1 && values[0].Type().Is())) { + throw Error{errLogic, "Condition %s must have no argument or single null argument, but %d not null arguments was provided", + CondTypeToStr(cond), values.size()}; + } + break; + case CondGe: + case CondGt: + case CondLt: + case CondLe: + checkArgsCount(1); + break; + case CondLike: + checkArgsCount(1); + if (!values[0].Type().Is()) { + throw Error{errLogic, "Condition %s must have string argument, but %d argument was provided", CondTypeToStr(cond), + values[0].Type().Name()}; + } + break; + case CondRange: + case CondDWithin: + checkArgsCount(2); + break; + } +} +template void QueryEntry::verify<0u>(CondType, const VariantArray &); +template void QueryEntry::verify(CondType, const VariantArray &); + std::string QueryEntry::Dump() const { WrSerializer ser; - if (distinct) { - ser << "Distinct index: " << index; + if (Distinct()) { + ser << "Distinct index: " << FieldName(); } else { - ser << index << ' ' << condition << ' '; - const bool severalValues = (values.size() > 1); + ser << FieldName() << ' ' << condition_ << ' '; + const bool severalValues = (Values().size() > 1); if (severalValues) ser << '('; - for (auto &v : values) { - if (&v != &*values.begin()) ser << ','; + for (auto &v : Values()) { + if (&v != &*Values().begin()) ser << ','; ser << '\'' << v.As() << '\''; } if (severalValues) ser << ')'; @@ -75,18 +177,18 @@ std::string QueryEntry::Dump() const { std::string QueryEntry::DumpBrief() const { WrSerializer ser; { - ser << index << ' ' << condition << ' '; - const bool severalValues = (values.size() > 1); + ser << FieldName() << ' ' << Condition() << ' '; + const bool severalValues = (Values().size() > 1); if (severalValues) { ser << "(...)"; } else { - ser << '\'' << values.front().As() << '\''; + ser << '\'' << Values().front().As() << '\''; } } return std::string(ser.Slice()); } -AggregateEntry::AggregateEntry(AggType type, h_vector fields, SortingEntries sort, unsigned limit, unsigned offset) +AggregateEntry::AggregateEntry(AggType type, h_vector &&fields, SortingEntries &&sort, unsigned limit, unsigned offset) : type_(type), fields_(std::move(fields)), sortingEntries_{std::move(sort)}, limit_(limit), offset_(offset) { switch (type_) { case AggFacet: @@ -134,7 +236,7 @@ AggregateEntry::AggregateEntry(AggType type, h_vector fields, So } } -void AggregateEntry::AddSortingEntry(SortingEntry sorting) { +void AggregateEntry::AddSortingEntry(SortingEntry &&sorting) { if (type_ != AggFacet) { throw Error(errQueryExec, "Sort is not available for aggregation %s", AggTypeToStr(type_)); } @@ -155,21 +257,13 @@ void AggregateEntry::SetOffset(unsigned o) { offset_ = o; } -BetweenFieldsQueryEntry::BetweenFieldsQueryEntry(std::string fstIdx, CondType cond, std::string sndIdx) - : firstIndex{std::move(fstIdx)}, secondIndex{std::move(sndIdx)}, condition_{cond} { - if (condition_ == CondAny || condition_ == CondEmpty || condition_ == CondDWithin) { - throw Error{errLogic, "Condition '%s' is inapplicable between two fields", std::string{CondTypeToStr(condition_)}}; - } -} - bool BetweenFieldsQueryEntry::operator==(const BetweenFieldsQueryEntry &other) const noexcept { - return firstIdxNo == other.firstIdxNo && secondIdxNo == other.secondIdxNo && Condition() == other.Condition() && - firstIndex == other.firstIndex && secondIndex == other.secondIndex; + return leftField_ == other.leftField_ && rightField_ == other.rightField_ && Condition() == other.Condition(); } std::string BetweenFieldsQueryEntry::Dump() const { WrSerializer ser; - ser << firstIndex << ' ' << Condition() << ' ' << secondIndex; + ser << LeftFieldName() << ' ' << Condition() << ' ' << RightFieldName(); return std::string{ser.Slice()}; } @@ -184,30 +278,31 @@ void QueryEntries::serialize(const_iterator it, const_iterator to, WrSerializer ser.PutVarUint(QueryCloseBracket); }, [&ser, op](const QueryEntry &entry) { - entry.distinct ? ser.PutVarUint(QueryDistinct) : ser.PutVarUint(QueryCondition); - ser.PutVString(entry.index); - if (entry.distinct) return; + entry.Distinct() ? ser.PutVarUint(QueryDistinct) : ser.PutVarUint(QueryCondition); + ser.PutVString(entry.FieldName()); + if (entry.Distinct()) return; ser.PutVarUint(op); - ser.PutVarUint(entry.condition); - if (entry.condition == CondDWithin) { - if (entry.values.size() != 2) { - throw Error(errLogic, "Condition DWithin must have exact 2 value, but %d values was provided", entry.values.size()); + ser.PutVarUint(entry.Condition()); + if (entry.Condition() == CondDWithin) { + if (entry.Values().size() != 2) { + throw Error(errLogic, "Condition DWithin must have exact 2 value, but %d values was provided", + entry.Values().size()); } ser.PutVarUint(3); - if (entry.values[0].Type().Is()) { - const Point point = static_cast(entry.values[0]); + if (entry.Values()[0].Type().Is()) { + const Point point = static_cast(entry.Values()[0]); ser.PutDouble(point.X()); ser.PutDouble(point.Y()); - ser.PutVariant(entry.values[1]); + ser.PutVariant(entry.Values()[1]); } else { - const Point point = static_cast(entry.values[1]); + const Point point = static_cast(entry.Values()[1]); ser.PutDouble(point.X()); ser.PutDouble(point.Y()); - ser.PutVariant(entry.values[0]); + ser.PutVariant(entry.Values()[0]); } } else { - ser.PutVarUint(entry.values.size()); - for (auto &kv : entry.values) ser.PutVariant(kv); + ser.PutVarUint(entry.Values().size()); + for (auto &kv : entry.Values()) ser.PutVariant(kv); } }, [&ser, op](const JoinQueryEntry &jqe) { @@ -218,9 +313,9 @@ void QueryEntries::serialize(const_iterator it, const_iterator to, WrSerializer [&ser, op](const BetweenFieldsQueryEntry &entry) { ser.PutVarUint(QueryBetweenFieldsCondition); ser.PutVarUint(op); - ser.PutVString(entry.firstIndex); + ser.PutVString(entry.LeftFieldName()); ser.PutVarUint(entry.Condition()); - ser.PutVString(entry.secondIndex); + ser.PutVString(entry.RightFieldName()); }, [&ser, op](const AlwaysFalse &) { ser.PutVarUint(QueryAlwaysFalseCondition); @@ -233,13 +328,8 @@ bool UpdateEntry::operator==(const UpdateEntry &obj) const noexcept { return isExpression_ == obj.isExpression_ && column_ == obj.column_ && mode_ == obj.mode_ && values_ == obj.values_; } -bool QueryJoinEntry::operator==(const QueryJoinEntry &obj) const noexcept { - if (op_ != obj.op_) return false; - if (static_cast(condition_) != obj.condition_) return false; - if (index_ != obj.index_) return false; - if (joinIndex_ != obj.joinIndex_) return false; - if (idxNo != obj.idxNo) return false; - return true; +bool QueryJoinEntry::operator==(const QueryJoinEntry &other) const noexcept { + return op_ == other.op_ && condition_ == other.condition_ && leftField_ == other.leftField_ && rightField_ == other.rightField_; } bool AggregateEntry::operator==(const AggregateEntry &obj) const noexcept { @@ -254,7 +344,7 @@ bool SortingEntry::operator==(const SortingEntry &obj) const noexcept { return true; } -bool QueryEntries::checkIfSatisfyConditions(const_iterator begin, const_iterator end, const ConstPayload &pl, TagsMatcher &tagsMatcher) { +bool QueryEntries::checkIfSatisfyConditions(const_iterator begin, const_iterator end, const ConstPayload &pl) { assertrx(begin != end && begin->operation != OpOr); bool result = true; for (auto it = begin; it != end; ++it) { @@ -264,40 +354,26 @@ bool QueryEntries::checkIfSatisfyConditions(const_iterator begin, const_iterator break; } const bool lastResult = it->InvokeAppropriate( - [&it, &pl, &tagsMatcher](const QueryEntriesBracket &) { - return checkIfSatisfyConditions(it.cbegin(), it.cend(), pl, tagsMatcher); - }, - [&pl, &tagsMatcher](const QueryEntry &qe) { return checkIfSatisfyCondition(qe, pl, tagsMatcher); }, - [&pl, &tagsMatcher](const BetweenFieldsQueryEntry &qe) { return checkIfSatisfyCondition(qe, pl, tagsMatcher); }, + [&it, &pl](const QueryEntriesBracket &) { return checkIfSatisfyConditions(it.cbegin(), it.cend(), pl); }, + [&pl](const QueryEntry &qe) { return checkIfSatisfyCondition(qe, pl); }, + [&pl](const BetweenFieldsQueryEntry &qe) { return checkIfSatisfyCondition(qe, pl); }, [](const JoinQueryEntry &) -> bool { abort(); }, [](const AlwaysFalse &) { return false; }); result = (lastResult != (it->operation == OpNot)); } return result; } -bool QueryEntries::checkIfSatisfyCondition(const QueryEntry &qEntry, const ConstPayload &pl, TagsMatcher &tagsMatcher) { +bool QueryEntries::checkIfSatisfyCondition(const QueryEntry &qEntry, const ConstPayload &pl) { VariantArray values; - if (qEntry.idxNo == IndexValueType::SetByJsonPath) { - pl.GetByJsonPath(qEntry.index, tagsMatcher, values, KeyValueType::Undefined{}); - } else { - pl.Get(qEntry.idxNo, values); - } - return checkIfSatisfyCondition(values, qEntry.condition, qEntry.values); + pl.GetByFieldsSet(qEntry.Fields(), values, qEntry.FieldType(), qEntry.CompositeFieldsTypes()); + return checkIfSatisfyCondition(values, qEntry.Condition(), qEntry.Values()); } -bool QueryEntries::checkIfSatisfyCondition(const BetweenFieldsQueryEntry &qEntry, const ConstPayload &pl, TagsMatcher &tagsMatcher) { +bool QueryEntries::checkIfSatisfyCondition(const BetweenFieldsQueryEntry &qEntry, const ConstPayload &pl) { VariantArray lValues; - if (qEntry.firstIdxNo == IndexValueType::SetByJsonPath) { - pl.GetByJsonPath(qEntry.firstIndex, tagsMatcher, lValues, KeyValueType::Undefined{}); - } else { - pl.Get(qEntry.firstIdxNo, lValues); - } + pl.GetByFieldsSet(qEntry.LeftFields(), lValues, qEntry.LeftFieldType(), qEntry.LeftCompositeFieldsTypes()); VariantArray rValues; - if (qEntry.secondIdxNo == IndexValueType::SetByJsonPath) { - pl.GetByJsonPath(qEntry.secondIndex, tagsMatcher, rValues, KeyValueType::Undefined{}); - } else { - pl.Get(qEntry.secondIdxNo, rValues); - } + pl.GetByFieldsSet(qEntry.RightFields(), rValues, qEntry.RightFieldType(), qEntry.RightCompositeFieldsTypes()); return checkIfSatisfyCondition(lValues, qEntry.Condition(), rValues); } @@ -350,18 +426,11 @@ bool QueryEntries::checkIfSatisfyCondition(const VariantArray &lValues, CondType return false; } case CondType::CondRange: - if (rValues.size() != 2) throw Error(errParams, "For ranged query reuqired 2 arguments, but provided %d", rValues.size()); for (const auto &v : lValues) { if (v.RelaxCompare(rValues[0]) < 0 || v.RelaxCompare(rValues[1]) > 0) return false; } return true; case CondType::CondLike: - if (rValues.size() != 1) { - throw Error(errLogic, "Condition LIKE must have exact 1 value, but %d values was provided", rValues.size()); - } - if (!rValues[0].Type().Is()) { - throw Error(errLogic, "Condition LIKE must have value of string type, but %s value was provided", rValues[0].Type().Name()); - } for (const auto &v : lValues) { if (!v.Type().Is()) { throw Error(errLogic, "Condition LIKE must be applied to data of string type, but %s was provided", v.Type().Name()); @@ -370,9 +439,6 @@ bool QueryEntries::checkIfSatisfyCondition(const VariantArray &lValues, CondType } return false; case CondType::CondDWithin: { - if (rValues.size() != 2) { - throw Error(errLogic, "Condition DWithin must have exact 2 value, but %d values was provided", rValues.size()); - } Point point; double distance; if (rValues[0].Type().Is()) { @@ -390,4 +456,56 @@ bool QueryEntries::checkIfSatisfyCondition(const VariantArray &lValues, CondType return false; } +template +std::string QueryJoinEntry::DumpCondition(const JS &joinedSelector, bool needOp) const { + WrSerializer ser; + const auto &q = joinedSelector.JoinQuery(); + if (needOp) { + ser << ' ' << op_ << ' '; + } + ser << q.NsName() << '.' << RightFieldName() << ' ' << InvertJoinCondition(condition_) << ' ' << LeftFieldName(); + return std::string{ser.Slice()}; +} +template std::string QueryJoinEntry::DumpCondition(const JoinedSelector &, bool) const; + +void QueryEntries::dumpEqualPositions(size_t level, WrSerializer &ser, const EqualPositions_t &equalPositions) { + for (const auto &eq : equalPositions) { + for (size_t i = 0; i < level; ++i) { + ser << " "; + } + ser << "equal_poisition("; + for (size_t i = 0, s = eq.size(); i < s; ++i) { + if (i != 0) ser << ", "; + ser << eq[i]; + } + ser << ")\n"; + } +} + +template +void QueryEntries::dump(size_t level, const_iterator begin, const_iterator end, const std::vector &joinedSelectors, WrSerializer &ser) { + for (const_iterator it = begin; it != end; ++it) { + for (size_t i = 0; i < level; ++i) { + ser << " "; + } + if (it != begin || it->operation != OpAnd) { + ser << it->operation << ' '; + } + it->InvokeAppropriate( + [&](const QueryEntriesBracket &b) { + ser << "(\n"; + dump(level + 1, it.cbegin(), it.cend(), joinedSelectors, ser); + dumpEqualPositions(level + 1, ser, b.equalPositions); + for (size_t i = 0; i < level; ++i) { + ser << " "; + } + ser << ")\n"; + }, + [&ser](const QueryEntry &qe) { ser << qe.Dump() << '\n'; }, + [&joinedSelectors, &ser](const JoinQueryEntry &jqe) { ser << jqe.Dump(joinedSelectors) << '\n'; }, + [&ser](const BetweenFieldsQueryEntry &qe) { ser << qe.Dump() << '\n'; }, + [&ser](const AlwaysFalse &) { ser << "AlwaysFalse" << 'n'; }); + } +} + } // namespace reindexer diff --git a/cpp_src/core/query/queryentry.h b/cpp_src/core/query/queryentry.h index d2b53ebf8..6d00faf35 100644 --- a/cpp_src/core/query/queryentry.h +++ b/cpp_src/core/query/queryentry.h @@ -5,10 +5,11 @@ #include #include "core/expressiontree.h" #include "core/keyvalue/variant.h" +#include "core/payload/fieldsset.h" #include "core/type_consts.h" -#include "core/type_consts_helpers.h" #include "estl/h_vector.h" #include "tools/serializer.h" +#include "tools/verifying_updater.h" namespace reindexer { @@ -31,44 +32,181 @@ struct JoinQueryEntry { std::string DumpOnCondition(const std::vector &joinedSelectors) const; }; -struct QueryEntry { +class QueryField { +public: + template + explicit QueryField(Str &&fieldName) noexcept : fieldName_{std::forward(fieldName)} {} + QueryField(std::string &&fieldName, int idxNo, FieldsSet fields, KeyValueType fieldType, + std::vector &&compositeFieldsTypes); + QueryField(QueryField &&) noexcept = default; + QueryField(const QueryField &) = default; + QueryField &operator=(QueryField &&) noexcept = default; + + [[nodiscard]] bool operator==(const QueryField &) const noexcept; + [[nodiscard]] bool operator!=(const QueryField &other) const noexcept { return !operator==(other); } + + [[nodiscard]] int IndexNo() const noexcept { return idxNo_; } + [[nodiscard]] bool IsFieldIndexed() const noexcept { return idxNo_ >= 0; } + [[nodiscard]] bool FieldsHaveBeenSet() const noexcept { return idxNo_ != IndexValueType::NotSet; } + [[nodiscard]] const FieldsSet &Fields() const &noexcept { return fieldsSet_; } + [[nodiscard]] const std::string &FieldName() const &noexcept { return fieldName_; } + [[nodiscard]] KeyValueType FieldType() const noexcept { return fieldType_; } + [[nodiscard]] KeyValueType SelectType() const noexcept { return selectType_; } + [[nodiscard]] const std::vector &CompositeFieldsTypes() const &noexcept { return compositeFieldsTypes_; } + [[nodiscard]] bool HaveEmptyField() const noexcept; + void SetField(FieldsSet &&fields) &; + void SetIndexData(int idxNo, FieldsSet &&fields, KeyValueType fieldType, KeyValueType selectType, + std::vector &&compositeFieldsTypes) &; + + QueryField &operator=(const QueryField &) = delete; + auto Fields() const && = delete; + auto FieldName() const && = delete; + auto CompositeFieldsTypes() const && = delete; + +private: + std::string fieldName_; + int idxNo_{IndexValueType::NotSet}; + FieldsSet fieldsSet_; + KeyValueType fieldType_{KeyValueType::Undefined{}}; + KeyValueType selectType_{KeyValueType::Undefined{}}; + std::vector compositeFieldsTypes_; +}; + +class QueryEntry : private QueryField { + static constexpr unsigned kIgnoreEmptyValues = 1u; + +public: + struct DistinctTag {}; + struct IgnoreEmptyValues {}; static constexpr unsigned kDefaultLimit = UINT_MAX; static constexpr unsigned kDefaultOffset = 0; - QueryEntry(std::string idx, CondType cond, VariantArray v) : index{std::move(idx)}, condition{cond}, values(std::move(v)) {} - QueryEntry(CondType cond, std::string idx, int idxN, bool dist = false) - : index(std::move(idx)), idxNo(idxN), condition(cond), distinct(dist) {} - QueryEntry() = default; + template + QueryEntry(Str &&fieldName, CondType cond, VariantArray &&v) + : QueryField{std::forward(fieldName)}, values_{std::move(v)}, condition_{cond} { + Verify(); + } + template + QueryEntry(Str &&fieldName, DistinctTag) : QueryField{std::forward(fieldName)}, condition_{CondAny}, distinct_{true} { + Verify(); + } + QueryEntry(QueryField &&field, CondType cond, VariantArray &&v) + : QueryField{std::move(field)}, values_{std::move(v)}, condition_{cond} { + Verify(); + } + QueryEntry(QueryField &&field, CondType cond, IgnoreEmptyValues) : QueryField{std::move(field)}, condition_{cond} { + verifyIgnoringEmptyValues(); + } + [[nodiscard]] CondType Condition() const noexcept { return condition_; } + [[nodiscard]] const VariantArray &Values() const &noexcept { return values_; } + [[nodiscard]] VariantArray &&Values() &&noexcept { return std::move(values_); } + [[nodiscard]] auto UpdatableValues(IgnoreEmptyValues) &noexcept { + return VerifyingUpdater{*this}; + } + [[nodiscard]] bool Distinct() const noexcept { return distinct_; } + void Distinct(bool d) noexcept { distinct_ = d; } + using QueryField::IndexNo; + using QueryField::IsFieldIndexed; + using QueryField::FieldsHaveBeenSet; + using QueryField::Fields; + using QueryField::FieldName; + using QueryField::FieldType; + using QueryField::SelectType; + using QueryField::CompositeFieldsTypes; + using QueryField::SetField; + using QueryField::SetIndexData; + using QueryField::HaveEmptyField; + void SetCondAndValues(CondType cond, VariantArray &&values) { + verify(cond, values); + condition_ = cond; + values_ = std::move(values); + } - bool operator==(const QueryEntry &) const; - bool operator!=(const QueryEntry &other) const { return !operator==(other); } + const QueryField &FieldData() const &noexcept { return static_cast(*this); } + QueryField &FieldData() &noexcept { return static_cast(*this); } + void ConvertValuesToFieldType() & { + for (Variant &v : values_) { + v.convert(SelectType()); + } + } + void ConvertValuesToFieldType(const PayloadType &pt) & { + if (SelectType().Is() || Condition() == CondDWithin) { + return; + } + for (Variant &v : values_) { + v.convert(SelectType(), &pt, &Fields()); + } + } + void Verify() const { verify(condition_, values_); } - std::string index; - int idxNo = IndexValueType::NotSet; - CondType condition = CondType::CondAny; - bool distinct = false; - VariantArray values; + [[nodiscard]] bool operator==(const QueryEntry &) const noexcept; + [[nodiscard]] bool operator!=(const QueryEntry &other) const noexcept { return !operator==(other); } - std::string Dump() const; - std::string DumpBrief() const; + [[nodiscard]] std::string Dump() const; + [[nodiscard]] std::string DumpBrief() const; + + auto Values() const && = delete; + auto FieldData() const && = delete; + +private: + template + static void verify(CondType, const VariantArray &); + void verifyIgnoringEmptyValues() const { verify(condition_, values_); } + + VariantArray values_; + CondType condition_; + bool distinct_{false}; }; +extern template void QueryEntry::verify<0u>(CondType, const VariantArray &); +extern template void QueryEntry::verify(CondType, const VariantArray &); class BetweenFieldsQueryEntry { public: - BetweenFieldsQueryEntry(std::string fstIdx, CondType cond, std::string sndIdx); - - bool operator==(const BetweenFieldsQueryEntry &) const noexcept; - bool operator!=(const BetweenFieldsQueryEntry &other) const noexcept { return !operator==(other); } - - std::string firstIndex; - std::string secondIndex; - int firstIdxNo = IndexValueType::NotSet; - int secondIdxNo = IndexValueType::NotSet; + template + BetweenFieldsQueryEntry(StrL &&fstIdx, CondType cond, StrR &&sndIdx) + : leftField_{std::forward(fstIdx)}, rightField_{std::forward(sndIdx)}, condition_{cond} { + if (condition_ == CondAny || condition_ == CondEmpty || condition_ == CondDWithin) { + throw Error{errLogic, "Condition '%s' is inapplicable between two fields", std::string{CondTypeToStr(condition_)}}; + } + } - CondType Condition() const noexcept { return condition_; } - std::string Dump() const; + [[nodiscard]] bool operator==(const BetweenFieldsQueryEntry &) const noexcept; + [[nodiscard]] bool operator!=(const BetweenFieldsQueryEntry &other) const noexcept { return !operator==(other); } + + [[nodiscard]] CondType Condition() const noexcept { return condition_; } + [[nodiscard]] int LeftIdxNo() const noexcept { return leftField_.IndexNo(); } + [[nodiscard]] int RightIdxNo() const noexcept { return rightField_.IndexNo(); } + [[nodiscard]] const std::string &LeftFieldName() const &noexcept { return leftField_.FieldName(); } + [[nodiscard]] const std::string &RightFieldName() const &noexcept { return rightField_.FieldName(); } + [[nodiscard]] const FieldsSet &LeftFields() const &noexcept { return leftField_.Fields(); } + [[nodiscard]] const FieldsSet &RightFields() const &noexcept { return rightField_.Fields(); } + [[nodiscard]] KeyValueType LeftFieldType() const noexcept { return leftField_.FieldType(); } + [[nodiscard]] KeyValueType RightFieldType() const noexcept { return rightField_.FieldType(); } + [[nodiscard]] const std::vector &LeftCompositeFieldsTypes() const &noexcept { return leftField_.CompositeFieldsTypes(); } + [[nodiscard]] const std::vector &RightCompositeFieldsTypes() const &noexcept { + return rightField_.CompositeFieldsTypes(); + } + [[nodiscard]] const QueryField &LeftFieldData() const &noexcept { return leftField_; } + [[nodiscard]] QueryField &LeftFieldData() &noexcept { return leftField_; } + [[nodiscard]] const QueryField &RightFieldData() const &noexcept { return rightField_; } + [[nodiscard]] QueryField &RightFieldData() &noexcept { return rightField_; } + [[nodiscard]] bool FieldsHaveBeenSet() const noexcept { return leftField_.FieldsHaveBeenSet() && rightField_.FieldsHaveBeenSet(); } + [[nodiscard]] bool IsLeftFieldIndexed() const noexcept { return leftField_.IsFieldIndexed(); } + [[nodiscard]] bool IsRightFieldIndexed() const noexcept { return rightField_.IsFieldIndexed(); } + [[nodiscard]] std::string Dump() const; + + auto LeftFieldName() const && = delete; + auto RightFieldName() const && = delete; + auto LeftFields() const && = delete; + auto RightFields() const && = delete; + auto LeftCompositeFieldsTypes() const && = delete; + auto RightCompositeFieldsTypes() const && = delete; + auto LeftFieldData() const && = delete; + auto RightFieldData() const && = delete; private: + QueryField leftField_; + QueryField rightField_; CondType condition_; }; @@ -103,9 +241,7 @@ class QueryEntries void ToDsl(const Query &parentQuery, JsonBuilder &builder) const { return toDsl(cbegin(), cend(), parentQuery, builder); } void WriteSQLWhere(const Query &parentQuery, WrSerializer &, bool stripArgs) const; void Serialize(WrSerializer &ser) const { serialize(cbegin(), cend(), ser); } - bool CheckIfSatisfyConditions(const ConstPayload &pl, TagsMatcher &tm) const { - return checkIfSatisfyConditions(cbegin(), cend(), pl, tm); - } + bool CheckIfSatisfyConditions(const ConstPayload &pl) const { return checkIfSatisfyConditions(cbegin(), cend(), pl); } template std::string Dump(const std::vector &joinedSelectors) const { WrSerializer ser; @@ -120,57 +256,22 @@ class QueryEntries static void toDsl(const_iterator it, const_iterator to, const Query &parentQuery, JsonBuilder &); static void writeSQL(const Query &parentQuery, const_iterator from, const_iterator to, WrSerializer &, bool stripArgs); static void serialize(const_iterator it, const_iterator to, WrSerializer &); - static bool checkIfSatisfyConditions(const_iterator begin, const_iterator end, const ConstPayload &, TagsMatcher &); - static bool checkIfSatisfyCondition(const QueryEntry &, const ConstPayload &, TagsMatcher &); - static bool checkIfSatisfyCondition(const BetweenFieldsQueryEntry &, const ConstPayload &, TagsMatcher &); + static bool checkIfSatisfyConditions(const_iterator begin, const_iterator end, const ConstPayload &); + static bool checkIfSatisfyCondition(const QueryEntry &, const ConstPayload &); + static bool checkIfSatisfyCondition(const BetweenFieldsQueryEntry &, const ConstPayload &); static bool checkIfSatisfyCondition(const VariantArray &lValues, CondType, const VariantArray &rValues); protected: - static void dumpEqualPositions(size_t level, WrSerializer &ser, const EqualPositions_t &equalPositions) { - for (const auto &eq : equalPositions) { - for (size_t i = 0; i < level; ++i) { - ser << " "; - } - ser << "equal_poisition("; - for (size_t i = 0, s = eq.size(); i < s; ++i) { - if (i != 0) ser << ", "; - ser << eq[i]; - } - ser << ")\n"; - } - } - + static void dumpEqualPositions(size_t level, WrSerializer &, const EqualPositions_t &); template - static void dump(size_t level, const_iterator begin, const_iterator end, const std::vector &joinedSelectors, WrSerializer &ser) { - for (const_iterator it = begin; it != end; ++it) { - for (size_t i = 0; i < level; ++i) { - ser << " "; - } - if (it != begin || it->operation != OpAnd) { - ser << it->operation << ' '; - } - it->InvokeAppropriate( - [&](const QueryEntriesBracket &b) { - ser << "(\n"; - dump(level + 1, it.cbegin(), it.cend(), joinedSelectors, ser); - dumpEqualPositions(level + 1, ser, b.equalPositions); - for (size_t i = 0; i < level; ++i) { - ser << " "; - } - ser << ")\n"; - }, - [&ser](const QueryEntry &qe) { ser << qe.Dump() << '\n'; }, - [&joinedSelectors, &ser](const JoinQueryEntry &jqe) { ser << jqe.Dump(joinedSelectors) << '\n'; }, - [&ser](const BetweenFieldsQueryEntry &qe) { ser << qe.Dump() << '\n'; }, - [&ser](const AlwaysFalse &) { ser << "AlwaysFalse" << 'n'; }); - } - } + static void dump(size_t level, const_iterator begin, const_iterator end, const std::vector &joinedSelectors, WrSerializer &); }; class UpdateEntry { public: - UpdateEntry(std::string c, VariantArray v, FieldModifyMode m = FieldModeSet, bool e = false) - : column_(std::move(c)), values_(std::move(v)), mode_(m), isExpression_(e) { + template + UpdateEntry(Str &&c, VariantArray &&v, FieldModifyMode m = FieldModeSet, bool e = false) + : column_(std::forward(c)), values_(std::move(v)), mode_(m), isExpression_(e) { if (column_.empty()) { throw Error{errParams, "Empty update column name"}; } @@ -192,36 +293,71 @@ class UpdateEntry { bool isExpression_ = false; }; -struct QueryJoinEntry { - QueryJoinEntry() = default; - QueryJoinEntry(OpType op, CondType cond, std::string idx, std::string jIdx) - : op_{op}, condition_{cond}, index_{std::move(idx)}, joinIndex_{std::move(jIdx)} {} - bool operator==(const QueryJoinEntry &) const noexcept; - bool operator!=(const QueryJoinEntry &qje) const noexcept { return !operator==(qje); } - OpType op_ = OpAnd; - CondType condition_ = CondEq; ///< Condition applied to expression: index_ COND joinIndex_ - std::string index_; ///< main ns index field name - std::string joinIndex_; ///< joining ns index field name - int idxNo = -1; ///< index_ field Index number in main ns - bool reverseNamespacesOrder = false; ///< controls SQL encoding order - ///< false: mainNs.index Condition joinNs.joinIndex - ///< true: joinNs.joinIndex Invert(Condition) mainNs.index +class QueryJoinEntry { +public: + QueryJoinEntry(OpType op, CondType cond, std::string &&leftFld, std::string &&rightFld, bool reverseNs = false) noexcept + : leftField_{std::move(leftFld)}, rightField_{std::move(rightFld)}, op_{op}, condition_{cond}, reverseNamespacesOrder_{reverseNs} {} + [[nodiscard]] bool operator==(const QueryJoinEntry &) const noexcept; + [[nodiscard]] bool operator!=(const QueryJoinEntry &other) const noexcept { return !operator==(other); } + [[nodiscard]] bool IsLeftFieldIndexed() const noexcept { return leftField_.IsFieldIndexed(); } + [[nodiscard]] bool IsRightFieldIndexed() const noexcept { return rightField_.IsFieldIndexed(); } + [[nodiscard]] int LeftIdxNo() const noexcept { return leftField_.IndexNo(); } + [[nodiscard]] int RightIdxNo() const noexcept { return rightField_.IndexNo(); } + [[nodiscard]] const FieldsSet &LeftFields() const &noexcept { return leftField_.Fields(); } + [[nodiscard]] const FieldsSet &RightFields() const &noexcept { return rightField_.Fields(); } + [[nodiscard]] KeyValueType LeftFieldType() const noexcept { return leftField_.FieldType(); } + [[nodiscard]] KeyValueType RightFieldType() const noexcept { return rightField_.FieldType(); } + [[nodiscard]] const std::vector &LeftCompositeFieldsTypes() const &noexcept { return leftField_.CompositeFieldsTypes(); } + [[nodiscard]] const std::vector &RightCompositeFieldsTypes() const &noexcept { + return rightField_.CompositeFieldsTypes(); + } + [[nodiscard]] OpType Operation() const noexcept { return op_; } + [[nodiscard]] CondType Condition() const noexcept { return condition_; } + [[nodiscard]] const std::string &LeftFieldName() const &noexcept { return leftField_.FieldName(); } + [[nodiscard]] const std::string &RightFieldName() const &noexcept { return rightField_.FieldName(); } + [[nodiscard]] bool ReverseNamespacesOrder() const noexcept { return reverseNamespacesOrder_; } + [[nodiscard]] const QueryField &LeftFieldData() const &noexcept { return leftField_; } + [[nodiscard]] QueryField &LeftFieldData() &noexcept { return leftField_; } + [[nodiscard]] const QueryField &RightFieldData() const &noexcept { return rightField_; } + [[nodiscard]] QueryField &RightFieldData() &noexcept { return rightField_; } + void SetLeftIndexData(int idxNo, FieldsSet &&fields, KeyValueType fieldType, KeyValueType selectType, + std::vector &&compositeFieldsTypes) & { + leftField_.SetIndexData(idxNo, std::move(fields), fieldType, selectType, std::move(compositeFieldsTypes)); + } + void SetRightIndexData(int idxNo, FieldsSet &&fields, KeyValueType fieldType, KeyValueType selectType, + std::vector &&compositeFieldsTypes) & { + rightField_.SetIndexData(idxNo, std::move(fields), fieldType, selectType, std::move(compositeFieldsTypes)); + } + void SetLeftField(FieldsSet &&fields) & { leftField_.SetField(std::move(fields)); } + void SetRightField(FieldsSet &&fields) & { rightField_.SetField(std::move(fields)); } + [[nodiscard]] bool FieldsHaveBeenSet() const noexcept { return leftField_.FieldsHaveBeenSet() && rightField_.FieldsHaveBeenSet(); } template - std::string DumpCondition(const JS &joinedSelector, bool needOp = false) const { - WrSerializer ser; - const auto &q = joinedSelector.JoinQuery(); - if (needOp) { - ser << ' ' << op_ << ' '; - } - ser << q._namespace << '.' << joinIndex_ << ' ' << InvertJoinCondition(condition_) << ' ' << index_; - return std::string{ser.Slice()}; - } + std::string DumpCondition(const JS &joinedSelector, bool needOp = false) const; + + auto LeftFields() const && = delete; + auto RightFields() const && = delete; + auto LeftCompositeFieldsTypes() const && = delete; + auto RightCompositeFieldsTypes() const && = delete; + auto LeftFieldName() const && = delete; + auto RightFieldName() const && = delete; + auto LeftFieldData() const && = delete; + auto RightFieldData() const && = delete; + +private: + QueryField leftField_; + QueryField rightField_; + const OpType op_; + const CondType condition_; + const bool reverseNamespacesOrder_; ///< controls SQL encoding order + ///< false: mainNs.index Condition joinNs.joinIndex + ///< true: joinNs.joinIndex Invert(Condition) mainNs.index }; struct SortingEntry { SortingEntry() noexcept = default; - SortingEntry(std::string e, bool d) noexcept : expression(std::move(e)), desc(d) {} + template + SortingEntry(Str &&e, bool d) noexcept : expression(std::forward(e)), desc(d) {} bool operator==(const SortingEntry &) const noexcept; bool operator!=(const SortingEntry &se) const noexcept { return !operator==(se); } std::string expression; @@ -233,7 +369,7 @@ struct SortingEntries : public h_vector {}; class AggregateEntry { public: - AggregateEntry(AggType type, h_vector fields, SortingEntries sort = {}, unsigned limit = QueryEntry::kDefaultLimit, + AggregateEntry(AggType type, h_vector &&fields, SortingEntries &&sort = {}, unsigned limit = QueryEntry::kDefaultLimit, unsigned offset = QueryEntry::kDefaultOffset); [[nodiscard]] bool operator==(const AggregateEntry &) const noexcept; [[nodiscard]] bool operator!=(const AggregateEntry &ae) const noexcept { return !operator==(ae); } @@ -242,7 +378,7 @@ class AggregateEntry { [[nodiscard]] const SortingEntries &Sorting() const noexcept { return sortingEntries_; } [[nodiscard]] unsigned Limit() const noexcept { return limit_; } [[nodiscard]] unsigned Offset() const noexcept { return offset_; } - void AddSortingEntry(SortingEntry); + void AddSortingEntry(SortingEntry &&); void SetLimit(unsigned); void SetOffset(unsigned); diff --git a/cpp_src/core/query/sql/sqlencoder.cc b/cpp_src/core/query/sql/sqlencoder.cc index aa82c1110..020ffea20 100644 --- a/cpp_src/core/query/sql/sqlencoder.cc +++ b/cpp_src/core/query/sql/sqlencoder.cc @@ -1,7 +1,7 @@ #include "core/query/sql/sqlencoder.h" #include "core/keyvalue/geometry.h" -#include "core/nsselecter/sortexpression.h" +#include "core/keyvalue/p_string.h" #include "core/queryresults/aggregationresult.h" #include "core/type_consts_helpers.h" #include "tools/serializer.h" @@ -16,7 +16,7 @@ static void indexToSql(const std::string &index, WrSerializer &ser) { } } -static WrSerializer &stringToSql(const std::string &str, WrSerializer &ser) { +static WrSerializer &stringToSql(std::string_view str, WrSerializer &ser) { ser << '\''; for (auto c : str) { switch (c) { @@ -48,15 +48,12 @@ static WrSerializer &stringToSql(const std::string &str, WrSerializer &ser) { ser << '\''; return ser; } - -SQLEncoder::SQLEncoder(const Query &q) : query_(q) {} - void SQLEncoder::DumpSingleJoinQuery(size_t idx, WrSerializer &ser, bool stripArgs) const { assertrx(idx < query_.joinQueries_.size()); const auto &jq = query_.joinQueries_[idx]; ser << ' ' << jq.joinType; - if (jq.entries.Empty() && jq.count == QueryEntry::kDefaultLimit && jq.sortingEntries_.empty()) { - ser << ' ' << jq._namespace << " ON "; + if (jq.entries.Empty() && !jq.HasLimit() && jq.sortingEntries_.empty()) { + ser << ' ' << jq.NsName() << " ON "; } else { ser << " ("; jq.GetSQL(ser, stripArgs); @@ -65,13 +62,13 @@ void SQLEncoder::DumpSingleJoinQuery(size_t idx, WrSerializer &ser, bool stripAr if (jq.joinEntries_.size() != 1) ser << "("; for (auto &e : jq.joinEntries_) { if (&e != &*jq.joinEntries_.begin()) { - ser << ' ' << e.op_ << ' '; + ser << ' ' << e.Operation() << ' '; } - if (e.reverseNamespacesOrder) { - ser << jq._namespace << '.' << e.joinIndex_ << ' ' << InvertJoinCondition(e.condition_) << ' ' << query_._namespace << '.' - << e.index_; + if (e.ReverseNamespacesOrder()) { + ser << jq.NsName() << '.' << e.RightFieldName() << ' ' << InvertJoinCondition(e.Condition()) << ' ' << query_.NsName() << '.' + << e.LeftFieldName(); } else { - ser << query_._namespace << '.' << e.index_ << ' ' << e.condition_ << ' ' << jq._namespace << '.' << e.joinIndex_; + ser << query_.NsName() << '.' << e.LeftFieldName() << ' ' << e.Condition() << ' ' << jq.NsName() << '.' << e.RightFieldName(); } } if (jq.joinEntries_.size() != 1) ser << ')'; @@ -137,7 +134,7 @@ void SQLEncoder::dumpEqualPositions(WrSerializer &ser, const EqualPositions_t &e } WrSerializer &SQLEncoder::GetSQL(WrSerializer &ser, bool stripArgs) const { - switch (query_.type_) { + switch (realQueryType_) { case QuerySelect: { ser << "SELECT "; bool needComma = false; @@ -171,10 +168,10 @@ WrSerializer &SQLEncoder::GetSQL(WrSerializer &ser, bool stripArgs) const { distinctIndex = query_.aggregations_[0].Fields()[0]; } if (query_.selectFilter_.empty()) { - if (query_.count != 0 || query_.calcTotal == ModeNoTotal) { + if (query_.Limit() != 0 || query_.CalcTotal() == ModeNoTotal) { if (needComma) ser << ", "; ser << '*'; - if (query_.calcTotal != ModeNoTotal) { + if (query_.CalcTotal() != ModeNoTotal) { needComma = true; } } @@ -190,19 +187,19 @@ WrSerializer &SQLEncoder::GetSQL(WrSerializer &ser, bool stripArgs) const { } } } - if (query_.calcTotal != ModeNoTotal) { + if (query_.CalcTotal() != ModeNoTotal) { if (needComma) ser << ", "; - if (query_.calcTotal == ModeAccurateTotal) ser << "COUNT(*)"; - if (query_.calcTotal == ModeCachedTotal) ser << "COUNT_CACHED(*)"; + if (query_.CalcTotal() == ModeAccurateTotal) ser << "COUNT(*)"; + if (query_.CalcTotal() == ModeCachedTotal) ser << "COUNT_CACHED(*)"; } - ser << " FROM " << query_._namespace; + ser << " FROM " << query_.NsName(); } break; case QueryDelete: - ser << "DELETE FROM " << query_._namespace; + ser << "DELETE FROM " << query_.NsName(); break; case QueryUpdate: { if (query_.UpdateFields().empty()) break; - ser << "UPDATE " << query_._namespace; + ser << "UPDATE " << query_.NsName(); FieldModifyMode mode = query_.UpdateFields().front().Mode(); bool isUpdate = (mode == FieldModeSet || mode == FieldModeSetJson); if (isUpdate) { @@ -219,11 +216,19 @@ WrSerializer &SQLEncoder::GetSQL(WrSerializer &ser, bool stripArgs) const { if (isArray) ser << '['; for (const Variant &v : field.Values()) { if (&v != &*field.Values().begin()) ser << ','; - if (v.Type().Is() && !field.IsExpression() && (mode != FieldModeSetJson)) { - stringToSql(v.As(), ser); - } else { - ser << v.As(); - } + v.Type().EvaluateOneOf(overloaded{ + [&](KeyValueType::String) { + if (!field.IsExpression() && mode != FieldModeSetJson) { + stringToSql(v.As(), ser); + } else { + ser << v.As(); + } + }, + [&](KeyValueType::Uuid) { ser << '\'' << v.As() << '\''; }, + [&](OneOf) { + ser << v.As(); + }}); } if (isArray) ser << "]"; } @@ -231,7 +236,7 @@ WrSerializer &SQLEncoder::GetSQL(WrSerializer &ser, bool stripArgs) const { break; } case QueryTruncate: - ser << "TRUNCATE " << query_._namespace; + ser << "TRUNCATE " << query_.NsName(); break; default: throw Error(errParams, "Not implemented"); @@ -242,8 +247,8 @@ WrSerializer &SQLEncoder::GetSQL(WrSerializer &ser, bool stripArgs) const { dumpMerged(ser, stripArgs); dumpOrderBy(ser, stripArgs); - if (query_.start != QueryEntry::kDefaultOffset && !stripArgs) ser << " OFFSET " << query_.start; - if (query_.count != QueryEntry::kDefaultLimit && !stripArgs) ser << " LIMIT " << query_.count; + if (query_.HasOffset() && !stripArgs) ser << " OFFSET " << query_.Offset(); + if (query_.HasLimit() && !stripArgs) ser << " LIMIT " << query_.Limit(); return ser; } @@ -274,41 +279,43 @@ void SQLEncoder::dumpWhereEntries(QueryEntries::const_iterator from, QueryEntrie if (encodedEntries) { ser << opNames[op] << ' '; } - if (entry.condition == CondDWithin) { + if (entry.Condition() == CondDWithin) { ser << "ST_DWithin("; - indexToSql(entry.index, ser); + indexToSql(entry.FieldName(), ser); if (stripArgs) { ser << ", ?, ?)"; } else { - assertrx(entry.values.size() == 2); + assertrx(entry.Values().size() == 2); Point point; double distance; - if (entry.values[0].Type().Is()) { - point = static_cast(entry.values[0]); - distance = entry.values[1].As(); + if (entry.Values()[0].Type().Is()) { + point = static_cast(entry.Values()[0]); + distance = entry.Values()[1].As(); } else { - point = static_cast(entry.values[1]); - distance = entry.values[0].As(); + point = static_cast(entry.Values()[1]); + distance = entry.Values()[0].As(); } ser << ", ST_GeomFromText('POINT(" << point.X() << ' ' << point.Y() << ")'), " << distance << ')'; } } else { - indexToSql(entry.index, ser); - ser << ' ' << entry.condition << ' '; - if (entry.condition == CondEmpty || entry.condition == CondAny) { + indexToSql(entry.FieldName(), ser); + ser << ' ' << entry.Condition() << ' '; + if (entry.Condition() == CondEmpty || entry.Condition() == CondAny) { } else if (stripArgs) { ser << '?'; } else { - if (entry.values.size() != 1) ser << '('; - for (auto &v : entry.values) { - if (&v != &entry.values[0]) ser << ','; - if (v.Type().Is()) { - stringToSql(v.As(), ser); - } else { - ser << v.As(); - } + if (entry.Values().size() != 1) ser << '('; + for (auto &v : entry.Values()) { + if (&v != &entry.Values()[0]) ser << ','; + v.Type().EvaluateOneOf(overloaded{ + [&](KeyValueType::String) { stringToSql(v.As(), ser); }, + [&](KeyValueType::Uuid) { ser << '\'' << v.As() << '\''; }, + [&](OneOf) { + ser << v.As(); + }}); } - if (entry.values.size() != 1) ser << ")"; + if (entry.Values().size() != 1) ser << ")"; } } }, @@ -319,9 +326,9 @@ void SQLEncoder::dumpWhereEntries(QueryEntries::const_iterator from, QueryEntrie SQLEncoder(query_).DumpSingleJoinQuery(jqe.joinIndex, ser, stripArgs); }, [&ser](const BetweenFieldsQueryEntry &entry) { - indexToSql(entry.firstIndex, ser); + indexToSql(entry.LeftFieldName(), ser); ser << ' ' << entry.Condition() << ' '; - indexToSql(entry.secondIndex, ser); + indexToSql(entry.RightFieldName(), ser); }); ++encodedEntries; } diff --git a/cpp_src/core/query/sql/sqlencoder.h b/cpp_src/core/query/sql/sqlencoder.h index c1b217609..968f387df 100644 --- a/cpp_src/core/query/sql/sqlencoder.h +++ b/cpp_src/core/query/sql/sqlencoder.h @@ -2,7 +2,6 @@ #include #include "core/query/query.h" -#include "core/type_consts.h" /// @namespace reindexer /// The base namespace @@ -12,7 +11,8 @@ class WrSerializer; class SQLEncoder { public: - SQLEncoder(const Query &q); + SQLEncoder(const Query &q) noexcept : SQLEncoder(q, q.Type()) {} + SQLEncoder(const Query &q, QueryType queryType) noexcept : query_(q), realQueryType_(queryType) {} WrSerializer &GetSQL(WrSerializer &ser, bool stripArgs = false) const; @@ -40,8 +40,8 @@ class SQLEncoder { /// Builds a print version of all equal_position() functions in query. /// @param ser - serializer to store SQL string - /// @param parenthesisIndex - index of current parenthesis - void dumpEqualPositions(WrSerializer &ser, const EqualPositions_t &) const; + /// @param equalPositions - equal positions array + void dumpEqualPositions(WrSerializer &ser, const EqualPositions_t &equalPositions) const; /// Builds a print version of all where condition entries. /// @param from - iterator to first entry @@ -52,6 +52,7 @@ class SQLEncoder { void dumpSQLWhere(WrSerializer &ser, bool stripArgs) const; const Query &query_; + const QueryType realQueryType_; }; } // namespace reindexer diff --git a/cpp_src/core/query/sql/sqlparser.cc b/cpp_src/core/query/sql/sqlparser.cc index 8549d03ae..1cb96cfd1 100644 --- a/cpp_src/core/query/sql/sqlparser.cc +++ b/cpp_src/core/query/sql/sqlparser.cc @@ -6,6 +6,7 @@ #include "core/queryresults/aggregationresult.h" #include "core/type_consts_helpers.h" #include "sqltokentype.h" +#include "tools/stringstools.h" #include "vendor/double-conversion/double-conversion.h" #include "vendor/gason/gason.h" @@ -91,16 +92,16 @@ int SQLParser::selectParse(tokenizer &parser) { parser.next_token(); tok = peekSqlToken(parser, SingleSelectFieldSqlToken); if (name.text() == "count"sv) { - query_.calcTotal = ModeAccurateTotal; + query_.CalcTotal(ModeAccurateTotal); if (!wasSelectFilter) { - query_.count = 0; + query_.Limit(0); } tok = parser.next_token(); if (tok.text() != "*") throw Error(errParseSQL, "Expected '*', but found '%s' in query, %s", tok.text(), parser.where()); } else if (name.text() == "count_cached"sv) { - query_.calcTotal = ModeCachedTotal; + query_.CalcTotal(ModeCachedTotal); if (!wasSelectFilter) { - query_.count = 0; + query_.Limit(0); } tok = parser.next_token(); if (tok.text() != "*"sv) throw Error(errParseSQL, "Expected '*', but found '%s' in query, %s", tok.text(), parser.where()); @@ -168,13 +169,13 @@ int SQLParser::selectParse(tokenizer &parser) { throw Error(errConflict, kAggregationWithSelectFieldsMsgError); } query_.selectFilter_.emplace_back(nameWithCase.text()); - query_.count = QueryEntry::kDefaultLimit; + query_.Limit(QueryEntry::kDefaultLimit); wasSelectFilter = true; } else if (name.text() == "*"sv) { if (!query_.CanAddSelectFilter()) { throw Error(errConflict, kAggregationWithSelectFieldsMsgError); } - query_.count = QueryEntry::kDefaultLimit; + query_.Limit(QueryEntry::kDefaultLimit); wasSelectFilter = true; query_.selectFilter_.clear(); } @@ -187,8 +188,8 @@ int SQLParser::selectParse(tokenizer &parser) { throw Error(errParams, "Expected 'FROM', but found '%s' in query, %s", tok.text(), parser.where()); peekSqlToken(parser, NamespaceSqlToken); - query_._namespace = std::string(parser.next_token().text()); - ctx_.updateLinkedNs(query_._namespace); + query_.SetNsName(parser.next_token().text()); + ctx_.updateLinkedNs(query_.NsName()); while (!parser.end()) { tok = peekSqlToken(parser, SelectConditionsStart); @@ -200,17 +201,17 @@ int SQLParser::selectParse(tokenizer &parser) { tok = parser.next_token(); if (tok.type != TokenNumber) throw Error(errParseSQL, "Expected number, but found '%s' in query, %s", tok.text(), parser.where()); - query_.count = stoi(tok.text()); + query_.Limit(stoi(tok.text())); } else if (tok.text() == "offset"sv) { parser.next_token(); tok = parser.next_token(); if (tok.type != TokenNumber) throw Error(errParseSQL, "Expected number, but found '%s' in query, %s", tok.text(), parser.where()); - query_.start = stoi(tok.text()); + query_.Offset(stoi(tok.text())); } else if (tok.text() == "order"sv) { parser.next_token(); parseOrderBy(parser, query_.sortingEntries_, query_.forcedSortOrder_); - ctx_.updateLinkedNs(query_._namespace); + ctx_.updateLinkedNs(query_.NsName()); } else if (tok.text() == "join"sv) { parser.next_token(); parseJoin(JoinType::LeftJoin, parser); @@ -402,8 +403,8 @@ int SQLParser::deleteParse(tokenizer &parser) { throw Error(errParams, "Expected 'FROM', but found '%s' in query, %s", tok.text(), parser.where()); peekSqlToken(parser, NamespaceSqlToken); - query_._namespace = std::string(parser.next_token().text()); - ctx_.updateLinkedNs(query_._namespace); + query_.SetNsName(parser.next_token().text()); + ctx_.updateLinkedNs(query_.NsName()); while (!parser.end()) { tok = peekSqlToken(parser, DeleteConditionsStart); @@ -415,17 +416,17 @@ int SQLParser::deleteParse(tokenizer &parser) { tok = parser.next_token(); if (tok.type != TokenNumber) throw Error(errParseSQL, "Expected number, but found '%s' in query, %s", tok.text(), parser.where()); - query_.count = stoi(tok.text()); + query_.Limit(stoi(tok.text())); } else if (tok.text() == "offset"sv) { parser.next_token(); tok = parser.next_token(); if (tok.type != TokenNumber) throw Error(errParseSQL, "Expected number, but found '%s' in query, %s", tok.text(), parser.where()); - query_.start = stoi(tok.text()); + query_.Offset(stoi(tok.text())); } else if (tok.text() == "order"sv) { parser.next_token(); parseOrderBy(parser, query_.sortingEntries_, query_.forcedSortOrder_); - ctx_.updateLinkedNs(query_._namespace); + ctx_.updateLinkedNs(query_.NsName()); } else break; } @@ -491,7 +492,7 @@ UpdateEntry SQLParser::parseUpdateField(tokenizer &parser) { if (tok.type != TokenName) { throw Error(errParseSQL, "Expected field name but found '%s' in query %s", tok.text(), parser.where()); } - UpdateEntry updateField{{tok.text().data(), tok.text().length()}, {}}; + UpdateEntry updateField{tok.text(), {}}; parser.next_token(); tok = parser.next_token(); @@ -545,8 +546,8 @@ int SQLParser::updateParse(tokenizer &parser) { parser.next_token(); token tok = peekSqlToken(parser, NamespaceSqlToken); - query_._namespace = std::string(tok.text()); - ctx_.updateLinkedNs(query_._namespace); + query_.SetNsName(tok.text()); + ctx_.updateLinkedNs(query_.NsName()); parser.next_token(); tok = peekSqlToken(parser, UpdateOptionsSqlToken); @@ -586,8 +587,8 @@ int SQLParser::updateParse(tokenizer &parser) { int SQLParser::truncateParse(tokenizer &parser) { parser.next_token(); token tok = peekSqlToken(parser, NamespaceSqlToken); - query_._namespace = std::string(tok.text()); - ctx_.updateLinkedNs(query_._namespace); + query_.SetNsName(tok.text()); + ctx_.updateLinkedNs(query_.NsName()); parser.next_token(); return 0; } @@ -650,7 +651,7 @@ int SQLParser::parseWhere(tokenizer &parser) { nextOp = OpAnd; } else { // Index name - const std::string index{tok.text()}; + std::string index{tok.text()}; // Operator CondType condition; @@ -673,13 +674,13 @@ int SQLParser::parseWhere(tokenizer &parser) { if (ctx_.autocompleteMode) peekSqlToken(parser, WhereFieldValueSqlToken, false); tok = parser.next_token(); if (iequals(tok.text(), "null"sv) || iequals(tok.text(), "empty"sv)) { - query_.entries.Append(nextOp, QueryEntry{index, CondEmpty, {}}); + query_.entries.Append(nextOp, QueryEntry{std::move(index), CondEmpty, {}}); } else if (iequals(tok.text(), "not"sv)) { tok = peekSqlToken(parser, WhereFieldNegateValueSqlToken, false); if (!iequals(tok.text(), "null"sv) && !iequals(tok.text(), "empty"sv)) { throw Error(errParseSQL, "Expected NULL, but found '%s' in query, %s", tok.text(), parser.where()); } - query_.entries.Append(nextOp, QueryEntry{index, CondAny, {}}); + query_.entries.Append(nextOp, QueryEntry{std::move(index), CondAny, {}}); tok = parser.next_token(false); } else if (tok.text() == "("sv) { VariantArray values; @@ -692,12 +693,12 @@ int SQLParser::parseWhere(tokenizer &parser) { if (tok.text() != ","sv) throw Error(errParseSQL, "Expected ')' or ',', but found '%s' in query, %s", tok.text(), parser.where()); } - query_.entries.Append(nextOp, QueryEntry{index, condition, std::move(values)}); + query_.entries.Append(nextOp, QueryEntry{std::move(index), condition, std::move(values)}); } else if (tok.type != TokenName || toLower(tok.text()) == "true" || toLower(tok.text()) == "false") { - query_.entries.Append(nextOp, QueryEntry{index, condition, {token2kv(tok, parser, true)}}); + query_.entries.Append(nextOp, QueryEntry{std::move(index), condition, {token2kv(tok, parser, true)}}); // Second field } else { - query_.entries.Append(nextOp, BetweenFieldsQueryEntry{index, condition, std::string{tok.text()}}); + query_.entries.Append(nextOp, BetweenFieldsQueryEntry{std::move(index), condition, std::string{tok.text()}}); } nextOp = OpAnd; } @@ -766,7 +767,7 @@ void SQLParser::parseEqualPositions(tokenizer &parser, std::vectorHoldsOrReferTo() && nameWithCase.text() == it->Value().index) { + if (it->HoldsOrReferTo() && nameWithCase.text() == it->Value().FieldName()) { validField = true; break; } @@ -933,11 +934,11 @@ void SQLParser::parseJoin(JoinType type, tokenizer &parser) { throw Error(errParseSQL, "Expected ')', but found %s, %s", tok.text(), parser.where()); } } else { - jquery._namespace = std::string(tok.text()); - ctx_.updateLinkedNs(jquery._namespace); + jquery.SetNsName(tok.text()); + ctx_.updateLinkedNs(jquery.NsName()); } jquery.joinType = type; - jparser.parseJoinEntries(parser, query_._namespace, jquery); + jparser.parseJoinEntries(parser, query_.NsName(), jquery); if (type != JoinType::LeftJoin) { query_.entries.Append((type == JoinType::InnerJoin) ? OpAnd : OpOr, JoinQueryEntry(query_.joinQueries_.size())); @@ -1023,28 +1024,25 @@ void SQLParser::parseJoinEntries(tokenizer &parser, const std::string &mainNs, J return; } - QueryJoinEntry je; - std::string ns1 = mainNs, ns2 = jquery._namespace; - std::string idx1 = parseJoinedFieldName(parser, ns1); - je.condition_ = getCondType(parser.next_token().text()); - std::string idx2 = parseJoinedFieldName(parser, ns2); - - if (ns1 == mainNs && ns2 == jquery._namespace) { - je.index_ = std::move(idx1); - je.joinIndex_ = std::move(idx2); - } else if (ns2 == mainNs && ns1 == jquery._namespace) { - je.index_ = std::move(idx2); - je.joinIndex_ = std::move(idx1); - je.condition_ = InvertJoinCondition(je.condition_); - je.reverseNamespacesOrder = true; - } else { - throw Error(errParseSQL, "Unexpected tables with ON statement: ('%s' and '%s') but expected ('%s' and '%s'), %s", ns1, ns2, - mainNs, jquery._namespace, parser.where()); + std::string ns1 = mainNs, ns2 = jquery.NsName(); + std::string fld1 = parseJoinedFieldName(parser, ns1); + CondType condition = getCondType(parser.next_token().text()); + std::string fld2 = parseJoinedFieldName(parser, ns2); + bool reverseNamespacesOrder{false}; + + if (ns1 != mainNs || ns2 != jquery.NsName()) { + if (ns2 == mainNs && ns1 == jquery.NsName()) { + std::swap(fld1, fld2); + condition = InvertJoinCondition(condition); + reverseNamespacesOrder = true; + } else { + throw Error(errParseSQL, "Unexpected tables with ON statement: ('%s' and '%s') but expected ('%s' and '%s'), %s", ns1, ns2, + mainNs, jquery.NsName(), parser.where()); + } } - je.op_ = jquery.nextOp_; + jquery.joinEntries_.emplace_back(jquery.nextOp_, condition, std::move(fld1), std::move(fld2), reverseNamespacesOrder); jquery.nextOp_ = OpAnd; - jquery.joinEntries_.emplace_back(std::move(je)); if (!braces) { return; } diff --git a/cpp_src/core/query/sql/sqlsuggester.h b/cpp_src/core/query/sql/sqlsuggester.h index f0e0feebf..e0d55ec29 100644 --- a/cpp_src/core/query/sql/sqlsuggester.h +++ b/cpp_src/core/query/sql/sqlsuggester.h @@ -2,9 +2,7 @@ #include #include "core/schema.h" -#include "estl/fast_hash_map.h" #include "sqlparser.h" -#include "tools/stringstools.h" namespace reindexer { diff --git a/cpp_src/core/querycache.h b/cpp_src/core/querycache.h index 4d98bd1d2..a9c0c4c14 100644 --- a/cpp_src/core/querycache.h +++ b/cpp_src/core/querycache.h @@ -8,9 +8,9 @@ namespace reindexer { -struct QueryTotalCountCacheVal { - QueryTotalCountCacheVal() = default; - QueryTotalCountCacheVal(size_t total) noexcept : total_count(total) {} +struct QueryCountCacheVal { + QueryCountCacheVal() = default; + QueryCountCacheVal(size_t total) noexcept : total_count(total) {} size_t Size() const noexcept { return 0; } @@ -49,9 +49,8 @@ struct HashQueryCacheKey { } }; -struct QueryTotalCountCache : LRUCache { - QueryTotalCountCache(size_t sizeLimit = kDefaultCacheSizeLimit, int hitCount = kDefaultHitCountToCache) - : LRUCache(sizeLimit, hitCount) {} -}; +using QueryCountCache = LRUCache; + +; } // namespace reindexer diff --git a/cpp_src/core/queryresults/queryresults.cc b/cpp_src/core/queryresults/queryresults.cc index 1ba73eaec..a17df13d5 100644 --- a/cpp_src/core/queryresults/queryresults.cc +++ b/cpp_src/core/queryresults/queryresults.cc @@ -427,18 +427,6 @@ Item QueryResults::Iterator::GetItem(bool enableHold) { return item; } -QueryResults::Iterator &QueryResults::Iterator::operator++() { - idx_++; - return *this; -} -QueryResults::Iterator &QueryResults::Iterator::operator+(int val) { - idx_ += val; - return *this; -} - -bool QueryResults::Iterator::operator!=(const Iterator &other) const { return idx_ != other.idx_; } -bool QueryResults::Iterator::operator==(const Iterator &other) const { return idx_ == other.idx_; } - void QueryResults::AddItem(Item &item, bool withData, bool enableHold) { auto ritem = item.impl_; if (item.GetID() != -1) { diff --git a/cpp_src/core/queryresults/queryresults.h b/cpp_src/core/queryresults/queryresults.h index e3e4d11f1..4c06d9d5f 100644 --- a/cpp_src/core/queryresults/queryresults.h +++ b/cpp_src/core/queryresults/queryresults.h @@ -52,10 +52,12 @@ class QueryResults { void AddItem(Item &item, bool withData = false, bool enableHold = true); std::string Dump() const; void Erase(ItemRefVector::iterator begin, ItemRefVector::iterator end); - size_t Count() const { return items_.size(); } - size_t TotalCount() const { return totalCount; } - const std::string &GetExplainResults() const { return explainResults; } - const std::vector &GetAggregationResults() const { return aggregationResults; } + size_t Count() const noexcept { return items_.size(); } + size_t TotalCount() const noexcept { return totalCount; } + const std::string &GetExplainResults() const &noexcept { return explainResults; } + const std::string &GetExplainResults() const && = delete; + const std::vector &GetAggregationResults() const &noexcept { return aggregationResults; } + const std::vector &GetAggregationResults() const && = delete; void Clear(); h_vector GetNamespaces() const; bool IsCacheEnabled() const { return !nonCacheableData; } @@ -77,12 +79,19 @@ class QueryResults { int64_t GetLSN() const { return qr_->items_[idx_].Value().GetLSN(); } bool IsRaw() const; std::string_view GetRaw() const; - Iterator &operator++(); - Iterator &operator+(int delta); - const Error &Status() const noexcept { return err_; } - bool operator!=(const Iterator &) const; - bool operator==(const Iterator &) const; - Iterator &operator*() { return *this; } + Iterator &operator++() noexcept { + idx_++; + return *this; + } + Iterator &operator+(int delta) noexcept { + idx_ += delta; + return *this; + } + + Error Status() const noexcept { return err_; } + bool operator==(const Iterator &other) const noexcept { return idx_ == other.idx_; } + bool operator!=(const Iterator &other) const noexcept { return !operator==(other); } + Iterator &operator*() noexcept { return *this; } const QueryResults *qr_; int idx_; diff --git a/cpp_src/core/queryresults/tableviewbuilder.h b/cpp_src/core/queryresults/tableviewbuilder.h index d9d092127..52fa3247a 100644 --- a/cpp_src/core/queryresults/tableviewbuilder.h +++ b/cpp_src/core/queryresults/tableviewbuilder.h @@ -2,15 +2,13 @@ #include #include +#include #include #include #include #include -#include "tools/errors.h" #include "tools/terminalutils.h" -#include - namespace reindexer { struct ColumnData { diff --git a/cpp_src/core/rdxcontext.h b/cpp_src/core/rdxcontext.h index c79b1eaed..ac7cfce47 100644 --- a/cpp_src/core/rdxcontext.h +++ b/cpp_src/core/rdxcontext.h @@ -18,22 +18,24 @@ struct IRdxCancelContext { virtual ~IRdxCancelContext() = default; }; +constexpr std::string_view kDefaultTimeoutError = "Context timeout"; +constexpr std::string_view kDefaultCancelError = "Context was canceled"; + template -void ThrowOnCancel(const Context& ctx, std::string_view errMsg = std::string_view()) { // TODO may be ""sv +void ThrowOnCancel(const Context& ctx, std::string_view errMsg = std::string_view()) { if (!ctx.isCancelable()) return; - auto cancel = ctx.checkCancel(); + const auto cancel = ctx.checkCancel(); switch (cancel) { case CancelType::Explicit: - throw Error(errCanceled, errMsg); + throw Error(errCanceled, errMsg.empty() ? kDefaultCancelError : errMsg); case CancelType::Timeout: - throw Error(errTimeout, errMsg); + throw Error(errTimeout, errMsg.empty() ? kDefaultTimeoutError : errMsg); case CancelType::None: return; - default: - assertrx(false); - throw Error(errCanceled, errMsg); } + assertrx(false); + throw Error(errCanceled, errMsg.empty() ? kDefaultCancelError : errMsg); } class RdxDeadlineContext : public IRdxCancelContext { @@ -156,7 +158,7 @@ class InternalRdxContext { std::move(user), connectionId); } InternalRdxContext WithContextParams(milliseconds timeout, std::string_view activityTracer, std::string&& user, - int connectionId) const { + int connectionId) const { return activityTracer.empty() ? InternalRdxContext(cmpl_, RdxDeadlineContext(timeout, deadlineCtx_.parent()), activityTracer_, user_, connectionId_) : InternalRdxContext(cmpl_, RdxDeadlineContext(timeout, deadlineCtx_.parent()), diff --git a/cpp_src/core/reindexerimpl.cc b/cpp_src/core/reindexerimpl.cc index b34c8778b..65ec563e5 100644 --- a/cpp_src/core/reindexerimpl.cc +++ b/cpp_src/core/reindexerimpl.cc @@ -4,17 +4,16 @@ #include #include #include "cjson/jsonbuilder.h" -#include "core/cjson/jsondecoder.h" #include "core/cjson/protobufschemabuilder.h" #include "core/iclientsstats.h" #include "core/index/index.h" #include "core/itemimpl.h" #include "core/nsselecter/crashqueryreporter.h" +#include "core/nsselecter/querypreprocessor.h" #include "core/query/sql/sqlsuggester.h" #include "core/selectfunc/selectfunc.h" #include "core/type_consts_helpers.h" #include "defnsconfigs.h" -#include "estl/contexted_locks.h" #include "estl/defines.h" #include "queryresults/joinresults.h" #include "replicator/replicator.h" @@ -244,7 +243,7 @@ Error ReindexerImpl::Connect(const std::string& dsn, ConnectOpts opts) { idx = nsIdx.fetch_add(1, std::memory_order_relaxed)) { auto& de = foundNs[idx]; if (de.isDir && validateObjectName(de.name, true)) { - if (de.name[0] == '@') { + if (de.name[0] == kTmpNsPrefix) { const std::string tmpPath = fs::JoinPath(storagePath_, de.name); logPrintf(LogWarning, "Dropping tmp namespace '%s'", de.name); if (fs::RmDirAll(tmpPath) < 0) { @@ -336,7 +335,7 @@ Error ReindexerImpl::addNamespace(const NamespaceDef& nsDef, const RdxContext& r Namespace::Ptr ns; try { { - ULock lock(mtx_, &rdxCtx); + SLock lock(mtx_, &rdxCtx); if (namespaces_.find(nsDef.name) != namespaces_.end()) { return Error(errParams, "Namespace '%s' already exists", nsDef.name); } @@ -361,7 +360,9 @@ Error ReindexerImpl::addNamespace(const NamespaceDef& nsDef, const RdxContext& r namespaces_.insert({nsDef.name, ns}); } if (!nsDef.isTemporary) observers_.OnWALUpdate(LSNPair(), nsDef.name, WALRecord(WalNamespaceAdd)); - for (auto& indexDef : nsDef.indexes) ns->AddIndex(indexDef, rdxCtx); + for (auto& indexDef : nsDef.indexes) { + ns->AddIndex(indexDef, rdxCtx); + } ns->SetSchema(nsDef.schemaJson, rdxCtx); if (nsDef.storage.IsSlaveMode()) ns->setSlaveMode(rdxCtx); @@ -427,7 +428,6 @@ Error ReindexerImpl::closeNamespace(std::string_view nsName, const RdxContext& c try { ULock lock(mtx_, &ctx); auto nsIt = namespaces_.find(nsName); - if (nsIt == namespaces_.end()) { return Error(errNotFound, "Namespace '%s' does not exist", nsName); } @@ -521,7 +521,9 @@ Error ReindexerImpl::renameNamespace(std::string_view srcNsName, const std::stri const InternalRdxContext& ctx) { Namespace::Ptr dstNs, srcNs; try { - if (dstNsName == srcNsName.data()) return errOK; + if (dstNsName == srcNsName.data()) { + return {}; + } if (isSystemNamespaceNameStrict(srcNsName)) { return Error(errParams, "Can't rename system namespace (%s)", srcNsName); } @@ -536,6 +538,22 @@ Error ReindexerImpl::renameNamespace(std::string_view srcNsName, const std::stri const auto rdxCtx = ctx.CreateRdxContext( ctx.NeedTraceActivity() ? (ser << "RENAME " << srcNsName << " to " << dstNsName).Slice() : ""sv, activities_); + { + // Perform namespace flushes to minimize chances of the flush under lock + SLock lock(mtx_, &rdxCtx); + auto srcIt = namespaces_.find(srcNsName); + srcNs = (srcIt != namespaces_.end()) ? srcIt->second : Namespace::Ptr(); + lock.unlock(); + + if (srcNs) { + auto err = srcNs->awaitMainNs(rdxCtx)->FlushStorage(rdxCtx); + if (!err.ok()) { + return Error(err.code(), "Unable to flush storage before rename: %s", err.what()); + } + srcNs.reset(); + } + } + ULock lock(mtx_, &rdxCtx); auto srcIt = namespaces_.find(srcNsName); if (srcIt == namespaces_.end()) { @@ -546,8 +564,7 @@ Error ReindexerImpl::renameNamespace(std::string_view srcNsName, const std::stri auto replState = srcNs->GetReplState(rdxCtx); - if (fromReplication || !replState.slaveMode) // rename from replicator forced temporary ns - { + if (fromReplication || !replState.slaveMode) { // rename from replicator forced temporary ns auto dstIt = namespaces_.find(dstNsName); auto needWalUpdate = !srcNs->GetDefinition(rdxCtx).isTemporary; if (dstIt != namespaces_.end()) { @@ -557,18 +574,16 @@ Error ReindexerImpl::renameNamespace(std::string_view srcNsName, const std::stri } else { srcNs->Rename(dstNsName, storagePath_, rdxCtx); } - if (needWalUpdate) observers_.OnWALUpdate(LSNPair(), srcNsName, WALRecord(WalNamespaceRename, dstNsName)); - - auto srcNamespace = srcIt->second; namespaces_.erase(srcIt); - namespaces_[dstNsName] = std::move(srcNamespace); + namespaces_[dstNsName] = std::move(srcNs); + if (needWalUpdate) observers_.OnWALUpdate(LSNPair(), srcNsName, WALRecord(WalNamespaceRename, dstNsName)); } else { return Error(errLogic, "Can't rename namespace in slave mode '%s'", srcNsName); } } catch (const Error& err) { return err; } - return errOK; + return {}; } template @@ -678,7 +693,7 @@ Error ReindexerImpl::Update(const Query& q, QueryResults& result, const Internal try { WrSerializer ser; const auto rdxCtx = ctx.CreateRdxContext(ctx.NeedTraceActivity() ? q.GetSQL(ser).Slice() : ""sv, activities_, result); - auto ns = getNamespace(q._namespace, rdxCtx); + auto ns = getNamespace(q.NsName(), rdxCtx); ns->Update(q, result, rdxCtx); if (ns->IsSystem(rdxCtx)) { const std::string kNsName = ns->GetName(rdxCtx); @@ -788,7 +803,7 @@ Error ReindexerImpl::Delete(std::string_view nsName, Item& item, QueryResults& q Error ReindexerImpl::Delete(const Query& q, QueryResults& result, const InternalRdxContext& ctx) { const auto makeCtxStr = [&q](WrSerializer& ser) -> WrSerializer& { return q.GetSQL(ser); }; - const std::string_view nsName = q._namespace; + const std::string_view nsName = q.NsName(); APPLY_NS_FUNCTION2(false, Delete, q, result); } @@ -808,7 +823,7 @@ Error ReindexerImpl::Select(std::string_view query, QueryResults& result, const err = Update(q, result, ctx); break; case QueryTruncate: - err = TruncateNamespace(q._namespace, ctx); + err = TruncateNamespace(q.NsName(), ctx); break; default: err = Error(errParams, "Error unsupported query type %d", q.type_); @@ -840,7 +855,7 @@ Error ReindexerImpl::Select(const Query& q, QueryResults& result, const Internal const auto rdxCtx = ctx.CreateRdxContext(ctx.NeedTraceActivity() ? nonNormalizedSQL.Slice() : "", activities_, result); NsLocker locks(rdxCtx); - auto mainNsWrp = getNamespace(q._namespace, rdxCtx); + auto mainNsWrp = getNamespace(q.NsName(), rdxCtx); auto mainNs = q.IsWALQuery() ? mainNsWrp->awaitMainNs(rdxCtx) : mainNsWrp->getMainNs(); const auto queriesPerfStatsEnabled = configProvider_.QueriesPerfStatsEnabled(); @@ -862,7 +877,7 @@ Error ReindexerImpl::Select(const Query& q, QueryResults& result, const Internal } : std::function{}; - const bool isSystemNsRequest = isSystemNamespaceNameFast(q._namespace); + const bool isSystemNsRequest = isSystemNamespaceNameFast(q.NsName()); QueryStatCalculator statCalculator( std::move(hitter), std::chrono::microseconds(queriesThresholdUS), queriesPerfStatsEnabled || configProvider_.GetSelectLoggingParams().thresholdUs >= 0, @@ -871,13 +886,13 @@ Error ReindexerImpl::Select(const Query& q, QueryResults& result, const Internal StatsLocker::StatsLockT statsSelectLck; if (isSystemNsRequest) { - statsSelectLck = syncSystemNamespaces(q._namespace, detectFilterNsNames(q), rdxCtx); + statsSelectLck = syncSystemNamespaces(q.NsName(), detectFilterNsNames(q), rdxCtx); } // Lookup and lock namespaces_ mainNs->updateSelectTime(); locks.Add(mainNs); q.WalkNested(false, true, [this, &locks, &rdxCtx](const Query& q) { - auto nsWrp = getNamespace(q._namespace, rdxCtx); + auto nsWrp = getNamespace(q.NsName(), rdxCtx); auto ns = q.IsWALQuery() ? nsWrp->awaitMainNs(rdxCtx) : nsWrp->getMainNs(); ns->updateSelectTime(); locks.Add(ns); @@ -955,26 +970,26 @@ bool ReindexerImpl::isPreResultValuesModeOptimizationAvailable(const Query& jIte jItemQ.entries.ExecuteAppropriateForEach( Skip{}, [&jns, &result](const QueryEntry& qe) { - if (qe.idxNo >= 0) { - assertrx(jns->indexes_.size() > static_cast(qe.idxNo)); - const IndexType indexType = jns->indexes_[qe.idxNo]->Type(); + if (qe.IsFieldIndexed()) { + assertrx(jns->indexes_.size() > static_cast(qe.IndexNo())); + const IndexType indexType = jns->indexes_[qe.IndexNo()]->Type(); if (IsComposite(indexType) || IsFullText(indexType)) result = false; } }, [&jns, &result](const BetweenFieldsQueryEntry& qe) { - if (qe.firstIdxNo >= 0) { - assertrx(jns->indexes_.size() > static_cast(qe.firstIdxNo)); - const IndexType indexType = jns->indexes_[qe.firstIdxNo]->Type(); + if (qe.IsLeftFieldIndexed()) { + assertrx(jns->indexes_.size() > static_cast(qe.LeftIdxNo())); + const IndexType indexType = jns->indexes_[qe.LeftIdxNo()]->Type(); if (IsComposite(indexType) || IsFullText(indexType)) result = false; } - if (qe.secondIdxNo >= 0) { - assertrx(jns->indexes_.size() > static_cast(qe.secondIdxNo)); - if (IsComposite(jns->indexes_[qe.secondIdxNo]->Type())) result = false; + if (qe.IsRightFieldIndexed()) { + assertrx(jns->indexes_.size() > static_cast(qe.RightIdxNo())); + if (IsComposite(jns->indexes_[qe.RightIdxNo()]->Type())) result = false; } }); if (!result) return false; for (const auto& se : mainQ.sortingEntries_) { - if (byJoinedField(se.expression, jItemQ._namespace)) return false; // TODO maybe allow #1410 + if (byJoinedField(se.expression, jItemQ.NsName())) return false; // TODO maybe allow #1410 } return true; } @@ -984,14 +999,14 @@ JoinedSelectors ReindexerImpl::prepareJoinedSelectors(const Query& q, QueryResul std::vector& queryResultsContexts, const RdxContext& rdxCtx) { JoinedSelectors joinedSelectors; if (q.joinQueries_.empty()) return joinedSelectors; - auto ns = locks.Get(q._namespace); + auto ns = locks.Get(q.NsName()); assertrx(ns); // For each joined queries uint32_t joinedSelectorsCount = uint32_t(q.joinQueries_.size()); for (auto& jq : q.joinQueries_) { - if rx_unlikely (isSystemNamespaceNameFast(jq._namespace)) { - throw Error(errParams, "Queries to system namespaces ('%s') are not supported inside JOIN statement", jq._namespace); + if rx_unlikely (isSystemNamespaceNameFast(jq.NsName())) { + throw Error(errParams, "Queries to system namespaces ('%s') are not supported inside JOIN statement", jq.NsName()); } if (rx_unlikely(!jq.joinQueries_.empty())) { throw Error(errParams, "JOINs nested into the other JOINs are not supported"); @@ -1001,13 +1016,13 @@ JoinedSelectors ReindexerImpl::prepareJoinedSelectors(const Query& q, QueryResul } // Get common results from joined namespaces_ - auto jns = locks.Get(jq._namespace); + auto jns = locks.Get(jq.NsName()); assertrx(jns); // Do join for each item in main result - Query jItemQ(jq._namespace); + Query jItemQ(jq.NsName()); jItemQ.explain_ = q.explain_; - jItemQ.Debug(jq.debugLevel).Limit(jq.count); + jItemQ.Debug(jq.debugLevel).Limit(jq.Limit()); jItemQ.Strict(q.strictMode); for (size_t i = 0; i < jq.sortingEntries_.size(); ++i) { jItemQ.Sort(jq.sortingEntries_[i].expression, jq.sortingEntries_[i].desc); @@ -1017,18 +1032,13 @@ JoinedSelectors ReindexerImpl::prepareJoinedSelectors(const Query& q, QueryResul // Construct join conditions for (auto& je : jq.joinEntries_) { - int joinIdx = IndexValueType::NotSet; - if (!jns->getIndexByName(je.joinIndex_, joinIdx)) { - joinIdx = IndexValueType::SetByJsonPath; - } - QueryEntry qe(InvertJoinCondition(je.condition_), je.joinIndex_, joinIdx); - if (!ns->getIndexByName(je.index_, const_cast(je).idxNo)) { - const_cast(je).idxNo = IndexValueType::SetByJsonPath; - } - jItemQ.entries.Append(je.op_, std::move(qe)); + QueryPreprocessor::SetQueryField(const_cast(je).LeftFieldData(), *ns); + QueryPreprocessor::SetQueryField(const_cast(je).RightFieldData(), *jns); + jItemQ.entries.Append(je.Operation(), QueryField(je.RightFieldData()), InvertJoinCondition(je.Condition()), + QueryEntry::IgnoreEmptyValues{}); } - Query jjq(jq); + Query jjq(static_cast(jq)); JoinPreResult::Ptr preResult = std::make_shared(); uint32_t joinedFieldIdx = uint32_t(joinedSelectors.size()); JoinCacheRes joinRes; @@ -1058,24 +1068,6 @@ JoinedSelectors ReindexerImpl::prepareJoinedSelectors(const Query& q, QueryResul result.AddNamespace(jns, true); if (preResult->dataMode == JoinPreResult::ModeValues) { - jItemQ.entries.ExecuteAppropriateForEach( - Skip{}, - [&jns](QueryEntry& qe) { - if (qe.idxNo != IndexValueType::SetByJsonPath) { - assertrx(qe.idxNo >= 0 && static_cast(qe.idxNo) < jns->indexes_.size()); - if (jns->indexes_[qe.idxNo]->Opts().IsSparse()) qe.idxNo = IndexValueType::SetByJsonPath; - } - }, - [&jns](BetweenFieldsQueryEntry& qe) { - if (qe.firstIdxNo != IndexValueType::SetByJsonPath) { - assertrx(qe.firstIdxNo >= 0 && static_cast(qe.firstIdxNo) < jns->indexes_.size()); - if (jns->indexes_[qe.firstIdxNo]->Opts().IsSparse()) qe.firstIdxNo = IndexValueType::SetByJsonPath; - } - if (qe.secondIdxNo != IndexValueType::SetByJsonPath) { - assertrx(qe.secondIdxNo >= 0 && static_cast(qe.secondIdxNo) < jns->indexes_.size()); - if (jns->indexes_[qe.secondIdxNo]->Opts().IsSparse()) qe.secondIdxNo = IndexValueType::SetByJsonPath; - } - }); preResult->values.PreselectAllowed(static_cast(jns->Config().maxPreselectSize) >= preResult->values.size()); if (!preResult->values.Locked()) preResult->values.Lock(); // If not from cache locks.Delete(jns); @@ -1091,9 +1083,9 @@ JoinedSelectors ReindexerImpl::prepareJoinedSelectors(const Query& q, QueryResul template void ReindexerImpl::doSelect(const Query& q, QueryResults& result, NsLocker& locks, SelectFunctionsHolder& func, const RdxContext& ctx, QueryStatCalculator& queryStatCalculator) { - auto ns = locks.Get(q._namespace); + auto ns = locks.Get(q.NsName()); if rx_unlikely (!ns) { - throw Error(errParams, "Namespace '%s' does not exist", q._namespace); + throw Error(errParams, "Namespace '%s' does not exist", q.NsName()); } std::vector joinQueryResultsContexts; bool thereAreJoins = !q.joinQueries_.empty(); @@ -1173,13 +1165,13 @@ void ReindexerImpl::doSelect(const Query& q, QueryResults& result, NsLocker& throw Error(errParams, "Limit and offset are not supported for aggregations '%s'", AggTypeToStr(errType)); } for (auto& mq : q.mergeQueries_) { - if rx_unlikely (isSystemNamespaceNameFast(mq._namespace)) { - throw Error(errParams, "Queries to system namespaces ('%s') are not supported inside MERGE statement", mq._namespace); + if rx_unlikely (isSystemNamespaceNameFast(mq.NsName())) { + throw Error(errParams, "Queries to system namespaces ('%s') are not supported inside MERGE statement", mq.NsName()); } if rx_unlikely (!mq.sortingEntries_.empty()) { throw Error(errParams, "Sorting in inner merge query is not allowed"); } - if rx_unlikely (!mq.aggregations_.empty() || mq.calcTotal != ModeNoTotal) { + if rx_unlikely (!mq.aggregations_.empty() || mq.CalcTotal() != ModeNoTotal) { throw Error(errParams, "Aggregations in inner merge query is not allowed"); } if rx_unlikely (mq.HasLimit() || mq.HasOffset()) { @@ -1189,7 +1181,7 @@ void ReindexerImpl::doSelect(const Query& q, QueryResults& result, NsLocker& throw Error(errParams, "MERGEs nested into the MERGEs are not supported"); } - auto mns = locks.Get(mq._namespace); + auto mns = locks.Get(mq.NsName()); assertrx(mns); SelectCtx mctx(mq, &q); mctx.nsid = ++counter; @@ -1204,16 +1196,16 @@ void ReindexerImpl::doSelect(const Query& q, QueryResults& result, NsLocker& result.AddNamespace(mns, true); } ItemRefVector& itemRefVec = result.Items(); - if (static_cast(q.start) >= itemRefVec.size()) { + if (q.Offset() >= itemRefVec.size()) { result.Erase(itemRefVec.begin(), itemRefVec.end()); return; } std::sort(itemRefVec.begin(), itemRefVec.end(), ItemRefLess()); - if (q.start > QueryEntry::kDefaultOffset) { - result.Erase(itemRefVec.begin(), itemRefVec.begin() + q.start); + if (q.HasOffset()) { + result.Erase(itemRefVec.begin(), itemRefVec.begin() + q.Offset()); } - if (itemRefVec.size() > q.count) { - result.Erase(itemRefVec.begin() + q.count, itemRefVec.end()); + if (itemRefVec.size() > q.Limit()) { + result.Erase(itemRefVec.begin() + q.Limit(), itemRefVec.end()); } } // Adding context to QueryResults @@ -1234,7 +1226,6 @@ Error ReindexerImpl::Commit(std::string_view /*_namespace*/) { Namespace::Ptr ReindexerImpl::getNamespace(std::string_view nsName, const RdxContext& ctx) { SLock lock(mtx_, &ctx); auto nsIt = namespaces_.find(nsName); - if (nsIt == namespaces_.end()) { throw Error(errParams, "Namespace '%s' does not exist", nsName); } @@ -1645,7 +1636,7 @@ ReindexerImpl::FilterNsNamesT ReindexerImpl::detectFilterNsNames(const Query& q) } if (entries.HoldsOrReferTo(i)) { auto& qe = entries.Get(i); - if (qe.index == kNsNameField) { + if (qe.FieldName() == kNsNameField) { if (op == OpNot) { return std::nullopt; } @@ -1653,15 +1644,15 @@ ReindexerImpl::FilterNsNamesT ReindexerImpl::detectFilterNsNames(const Query& q) [i](const BracketRange& br) noexcept { return i >= br.begin && i < br.end; }) != notBrackets.end()) { return std::nullopt; } - if (qe.condition != CondSet && qe.condition != CondEq) { + if (qe.Condition() != CondSet && qe.Condition() != CondEq) { return std::nullopt; } if (res.has_value()) { return std::nullopt; } res.emplace(); - res->reserve(qe.values.size()); - for (auto& v : qe.values) { + res->reserve(qe.Values().size()); + for (auto& v : qe.Values()) { if (!v.Type().Is()) { return std::nullopt; } @@ -1670,7 +1661,7 @@ ReindexerImpl::FilterNsNamesT ReindexerImpl::detectFilterNsNames(const Query& q) } } else if (entries.HoldsOrReferTo(i)) { auto& qe = entries.Get(i); - if (qe.firstIndex == kNsNameField || qe.secondIndex == kNsNameField) { + if (qe.LeftFieldName() == kNsNameField || qe.RightFieldName() == kNsNameField) { return std::nullopt; } } else if (op == OpNot && entries.IsSubTree(i)) { diff --git a/cpp_src/core/schema.cc b/cpp_src/core/schema.cc index 9e9ed51c1..8a43101f8 100644 --- a/cpp_src/core/schema.cc +++ b/cpp_src/core/schema.cc @@ -260,19 +260,13 @@ Error Schema::FromJSON(std::string_view json) { gason::JsonParser parser; auto node = parser.Parse(json); parseJsonNode(node, path, true); - originalJson_.assign(json.data(), json.size()); protobufNsNumber_ = node["x-protobuf-ns-number"].As(-1); - if (protobufNsNumber_ == -1 && originalJson_ != "{}") { + if (protobufNsNumber_ == -1 && json != "{}") { protobufNsNumber_ = counter++; - - // TODO: fix it - auto pos = originalJson_.find_last_of('}'); - if (pos != std::string::npos) { - originalJson_ = originalJson_.erase(pos); - originalJson_ += ",\"x-protobuf-ns-number\":" + std::to_string(protobufNsNumber_) + "}"; - } + originalJson_ = AppendProtobufNumber(json, protobufNsNumber_); + } else { + originalJson_.assign(json); } - } catch (const gason::Exception& ex) { return Error(errParseJson, "Schema: %s\nJson: %s", ex.what(), originalJson_); } catch (const Error& err) { @@ -305,6 +299,18 @@ Error Schema::GetProtobufSchema(WrSerializer& schema) const { return protobufSchemaStatus_; } +std::string Schema::AppendProtobufNumber(std::string_view j, int protobufNsNumber) { + std::string json(j); + if (protobufNsNumber != -1 && j != "{}") { + auto pos = json.find_last_of('}'); + if (pos != std::string::npos) { + json.erase(pos); + json += ",\"x-protobuf-ns-number\":" + std::to_string(protobufNsNumber) + "}"; + } + } + return json; +} + void Schema::parseJsonNode(const gason::JsonNode& node, PrefixTree::PathT& splittedPath, bool isRequired) { bool isArray = false; diff --git a/cpp_src/core/schema.h b/cpp_src/core/schema.h index a28cc3264..aa147f1c7 100644 --- a/cpp_src/core/schema.h +++ b/cpp_src/core/schema.h @@ -133,10 +133,12 @@ class Schema { Error FromJSON(std::string_view json); void GetJSON(WrSerializer&) const; + std::string_view GetJSON() const noexcept { return originalJson_; } Error BuildProtobufSchema(TagsMatcher& tm, PayloadType& pt); Error GetProtobufSchema(WrSerializer& schema) const; int GetProtobufNsNumber() const { return protobufNsNumber_; } const PrefixTree::PrefixTreeNode* GetRoot() const { return &paths_.root_; } + static std::string AppendProtobufNumber(std::string_view j, int protobufNsNumber); std::vector MakeCsvTagOrdering(const TagsMatcher& tm) const; bool IsEmpty() const noexcept; diff --git a/cpp_src/core/storage/leveldblogger.cc b/cpp_src/core/storage/leveldblogger.cc new file mode 100644 index 000000000..eba6dc16c --- /dev/null +++ b/cpp_src/core/storage/leveldblogger.cc @@ -0,0 +1,22 @@ +#include "leveldblogger.h" + +#include +#include + +// Using separate cc-file to be able to compile it with different options. +// Static LevelDB v1.23 is built with -fno-rtti by default and to inherit NoOpLogger from leveldb's logger, this file must be built with +// -fno-rtti to + +namespace reindexer { +namespace datastorage { + +class NoOpLogger : public leveldb::Logger { + void Logv(const char* /*format*/, va_list /*ap*/) override final {} +}; + +static NoOpLogger dummyLevelDBLogger; + +void SetDummyLogger(leveldb::Options& options) { options.info_log = &dummyLevelDBLogger; } + +} // namespace datastorage +} // namespace reindexer diff --git a/cpp_src/core/storage/leveldblogger.h b/cpp_src/core/storage/leveldblogger.h new file mode 100644 index 000000000..cd14a5242 --- /dev/null +++ b/cpp_src/core/storage/leveldblogger.h @@ -0,0 +1,13 @@ +#pragma once + +namespace leveldb { +class Options; +} + +namespace reindexer { +namespace datastorage { + +void SetDummyLogger(leveldb::Options& options); + +} +} // namespace reindexer diff --git a/cpp_src/core/storage/leveldbstorage.cc b/cpp_src/core/storage/leveldbstorage.cc index 00520a2fb..026a443d1 100644 --- a/cpp_src/core/storage/leveldbstorage.cc +++ b/cpp_src/core/storage/leveldbstorage.cc @@ -5,7 +5,9 @@ #include #include #include +#include "leveldblogger.h" #include "tools/assertrx.h" +#include "tools/fsops.h" namespace reindexer { namespace datastorage { @@ -125,6 +127,7 @@ Error LevelDbStorage::doOpen(const std::string& path, const StorageOpts& opts) { leveldb::Options options; options.create_if_missing = opts.IsCreateIfMissing(); options.max_open_files = 50; + SetDummyLogger(options); leveldb::DB* db; leveldb::Status status = leveldb::DB::Open(options, path, &db); @@ -144,7 +147,11 @@ void LevelDbStorage::doDestroy(const std::string& path) { db_.reset(); leveldb::Status status = leveldb::DestroyDB(path.c_str(), options); if (!status.ok()) { - printf("Cannot destroy DB: %s, %s\n", path.c_str(), status.ToString().c_str()); + fprintf(stderr, "Cannot destroy LevelDB's storage: %s, %s. Trying to remove files by the backup mechanism...\n", path.c_str(), + status.ToString().c_str()); + if (fs::RmDirAll(path) != 0) { + fprintf(stderr, "Unable to remove LevelDB's storage: %s, %s", path.c_str(), strerror(errno)); + } } } diff --git a/cpp_src/core/storage/leveldbstorage.h b/cpp_src/core/storage/leveldbstorage.h index 291396452..472c1d696 100644 --- a/cpp_src/core/storage/leveldbstorage.h +++ b/cpp_src/core/storage/leveldbstorage.h @@ -2,6 +2,7 @@ #ifdef REINDEX_WITH_LEVELDB +#include #include #include #include "basestorage.h" diff --git a/cpp_src/core/storage/rocksdbstorage.cc b/cpp_src/core/storage/rocksdbstorage.cc index 4d20d8ef7..9535275b9 100644 --- a/cpp_src/core/storage/rocksdbstorage.cc +++ b/cpp_src/core/storage/rocksdbstorage.cc @@ -6,6 +6,7 @@ #include #include #include +#include "tools/fsops.h" namespace reindexer { namespace datastorage { @@ -143,8 +144,10 @@ void RocksDbStorage::doDestroy(const std::string& path) { options.create_if_missing = true; db_.reset(); rocksdb::Status status = rocksdb::DestroyDB(path.c_str(), options); - if (!status.ok()) { - printf("Cannot destroy DB: %s, %s\n", path.c_str(), status.ToString().c_str()); + fprintf(stderr, "Cannot destroy RocksDB's storage: %s, %s. Trying to remove files by the backup mechanism...\n", path.c_str(), + status.ToString().c_str()); + if (fs::RmDirAll(path) != 0) { + fprintf(stderr, "Unable to remove RocksDB's storage: %s, %s", path.c_str(), strerror(errno)); } } diff --git a/cpp_src/core/transaction.cc b/cpp_src/core/transaction.cc index 6e54bc431..fc39bad7f 100644 --- a/cpp_src/core/transaction.cc +++ b/cpp_src/core/transaction.cc @@ -67,4 +67,8 @@ Transaction::time_point Transaction::GetStartTime() const { return impl_->startTime_; } +void Transaction::ValidatePK(const FieldsSet &pkFields) { + if (impl_) impl_->ValidatePK(pkFields); +} + } // namespace reindexer diff --git a/cpp_src/core/transaction.h b/cpp_src/core/transaction.h index bd68418b4..0ab5c8b43 100644 --- a/cpp_src/core/transaction.h +++ b/cpp_src/core/transaction.h @@ -42,6 +42,7 @@ class Transaction { const std::vector &GetSteps() const; bool IsTagsUpdated() const; time_point GetStartTime() const; + void ValidatePK(const FieldsSet &pkFields); protected: std::unique_ptr impl_; diff --git a/cpp_src/core/transactionimpl.cc b/cpp_src/core/transactionimpl.cc index e9f47393c..a7ccb4f16 100644 --- a/cpp_src/core/transactionimpl.cc +++ b/cpp_src/core/transactionimpl.cc @@ -21,15 +21,14 @@ Item TransactionImpl::GetItem(TransactionStep &&st) { return Item(new ItemImpl(payloadType_, tagsMatcher_, pkFields_, schema_, std::move(st.itemData_))); } -TransactionImpl::TransactionImpl(const std::string &nsName, const PayloadType &pt, const TagsMatcher &tm, const FieldsSet &pf, - std::shared_ptr schema) - : payloadType_(pt), - tagsMatcher_(tm), - pkFields_(pf), - schema_(std::move(schema)), - nsName_(nsName), - tagsUpdated_(false), - startTime_(std::chrono::high_resolution_clock::now()) {} +void TransactionImpl::ValidatePK(const FieldsSet &pkFields) { + std::lock_guard lck(mtx_); + if (hasDeleteItemSteps_ && rx_unlikely(pkFields != pkFields_)) { + throw Error( + errNotValid, + "Transaction has Delete-calls and it's PK metadata is outdated (probably PK has been change during the transaction creation)"); + } +} void TransactionImpl::UpdateTagsMatcherFromItem(ItemImpl *ritem) { if (ritem->Type().get() != payloadType_.get() || (ritem->tagsMatcher().isUpdated() && !tagsMatcher_.try_merge(ritem->tagsMatcher()))) { @@ -76,6 +75,7 @@ void TransactionImpl::Delete(Item &&item) { void TransactionImpl::Modify(Item &&item, ItemModifyMode mode) { std::unique_lock lock(mtx_); checkTagsMatcher(item); + hasDeleteItemSteps_ = hasDeleteItemSteps_ || (mode == ModeDelete); steps_.emplace_back(TransactionStep{std::move(item), mode}); } diff --git a/cpp_src/core/transactionimpl.h b/cpp_src/core/transactionimpl.h index afe7641f7..89baa10d9 100644 --- a/cpp_src/core/transactionimpl.h +++ b/cpp_src/core/transactionimpl.h @@ -26,7 +26,15 @@ class TransactionStep { class TransactionImpl { public: TransactionImpl(const std::string &nsName, const PayloadType &pt, const TagsMatcher &tm, const FieldsSet &pf, - std::shared_ptr schema); + std::shared_ptr schema) + : payloadType_(pt), + tagsMatcher_(tm), + pkFields_(pf), + schema_(std::move(schema)), + nsName_(nsName), + tagsUpdated_(false), + hasDeleteItemSteps_(false), + startTime_(std::chrono::high_resolution_clock::now()) {} void Insert(Item &&item); void Update(Item &&item); @@ -38,6 +46,7 @@ class TransactionImpl { void UpdateTagsMatcherFromItem(ItemImpl *ritem); Item NewItem(); Item GetItem(TransactionStep &&st); + void ValidatePK(const FieldsSet &pkFields); const std::string &GetName() { return nsName_; } @@ -51,6 +60,7 @@ class TransactionImpl { std::vector steps_; std::string nsName_; bool tagsUpdated_; + bool hasDeleteItemSteps_; std::mutex mtx_; Transaction::time_point startTime_; }; diff --git a/cpp_src/core/txstats.h b/cpp_src/core/txstats.h index f30d7e27a..30e364909 100644 --- a/cpp_src/core/txstats.h +++ b/cpp_src/core/txstats.h @@ -1,9 +1,9 @@ #pragma once #include "core/transaction.h" +#include "core/transactionimpl.h" #include "namespace/namespacestat.h" #include "perfstatcounter.h" -#include "tools/stringstools.h" namespace reindexer { diff --git a/cpp_src/core/type_consts_helpers.cc b/cpp_src/core/type_consts_helpers.cc index 18fd15e1c..60db10409 100644 --- a/cpp_src/core/type_consts_helpers.cc +++ b/cpp_src/core/type_consts_helpers.cc @@ -110,6 +110,8 @@ namespace reindexer { return "unknown"sv; } +} // namespace reindexer + [[nodiscard]] std::string_view JoinTypeName(JoinType type) { using namespace std::string_view_literals; @@ -126,5 +128,3 @@ namespace reindexer { assertrx(false); return "unknown"sv; } - -} // namespace reindexer diff --git a/cpp_src/core/type_consts_helpers.h b/cpp_src/core/type_consts_helpers.h index 2fc5e36df..d4d32897e 100644 --- a/cpp_src/core/type_consts_helpers.h +++ b/cpp_src/core/type_consts_helpers.h @@ -11,6 +11,18 @@ namespace reindexer { [[nodiscard]] std::string_view TagTypeToStr(TagType); [[nodiscard]] std::string_view AggTypeToStr(AggType t) noexcept; +constexpr bool IsComposite(IndexType type) noexcept { + return type == IndexCompositeBTree || type == IndexCompositeFastFT || type == IndexCompositeFuzzyFT || type == IndexCompositeHash; +} + +constexpr bool IsFullText(IndexType type) noexcept { + return type == IndexFastFT || type == IndexFuzzyFT || type == IndexCompositeFastFT || type == IndexCompositeFuzzyFT; +} + +constexpr bool IsFastFullText(IndexType type) noexcept { return type == IndexFastFT || type == IndexCompositeFastFT; } + +} // namespace reindexer + /// Get readable Join Type /// @param type - join type /// @returns string with join type name @@ -132,15 +144,3 @@ T& operator<<(T& os, CollateMode m) { } std::abort(); } - -constexpr bool IsComposite(IndexType type) noexcept { - return type == IndexCompositeBTree || type == IndexCompositeFastFT || type == IndexCompositeFuzzyFT || type == IndexCompositeHash; -} - -constexpr bool IsFullText(IndexType type) noexcept { - return type == IndexFastFT || type == IndexFuzzyFT || type == IndexCompositeFastFT || type == IndexCompositeFuzzyFT; -} - -constexpr bool IsFastFullText(IndexType type) noexcept { return type == IndexFastFT || type == IndexCompositeFastFT; } - -} // namespace reindexer diff --git a/cpp_src/gtests/bench/fixtures/api_tv_composite.cc b/cpp_src/gtests/bench/fixtures/api_tv_composite.cc index fdeceac96..dc131e080 100644 --- a/cpp_src/gtests/bench/fixtures/api_tv_composite.cc +++ b/cpp_src/gtests/bench/fixtures/api_tv_composite.cc @@ -299,13 +299,14 @@ void ApiTvComposite::RangeHashCompositeIntStr(benchmark::State& state) { } void ApiTvComposite::RangeTreeIntSortByHashInt(State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); auto idRange = id_seq_->GetRandomIdRange(id_seq_->Count() * 0.02); - q.Where("id", CondRange, {idRange.first, idRange.second}).Sort("age", false).Limit(20); + q.Where("id", CondRange, {idRange.first, idRange.second}).Sort("age"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -314,13 +315,14 @@ void ApiTvComposite::RangeTreeIntSortByHashInt(State& state) { } void ApiTvComposite::RangeTreeIntSortByTreeInt(State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); auto idRange = id_seq_->GetRandomIdRange(id_seq_->Count() * 0.02); - q.Where("id", CondRange, {idRange.first, idRange.second}).Sort("year", false).Limit(20); + q.Where("id"sv, CondRange, {idRange.first, idRange.second}).Sort("year"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -329,13 +331,14 @@ void ApiTvComposite::RangeTreeIntSortByTreeInt(State& state) { } void ApiTvComposite::RangeTreeStrSortByHashInt(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); auto idRange = id_seq_->GetRandomIdRange(id_seq_->Count() * 0.02); - q.Where("id", CondRange, {std::to_string(idRange.first), std::to_string(idRange.second)}).Sort("age", false).Limit(20); + q.Where("id"sv, CondRange, {std::to_string(idRange.first), std::to_string(idRange.second)}).Sort("age"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -344,13 +347,14 @@ void ApiTvComposite::RangeTreeStrSortByHashInt(benchmark::State& state) { } void ApiTvComposite::RangeTreeStrSortByTreeInt(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); auto idRange = id_seq_->GetRandomIdRange(id_seq_->Count() * 0.02); - q.Where("id", CondRange, {std::to_string(idRange.first), std::to_string(idRange.second)}).Sort("year", false).Limit(20); + q.Where("id"sv, CondRange, {std::to_string(idRange.first), std::to_string(idRange.second)}).Sort("year"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -359,6 +363,7 @@ void ApiTvComposite::RangeTreeStrSortByTreeInt(benchmark::State& state) { } void ApiTvComposite::RangeTreeDoubleSortByTreeInt(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); @@ -366,7 +371,7 @@ void ApiTvComposite::RangeTreeDoubleSortByTreeInt(benchmark::State& state) { auto leftRate = random(0.0, 4.99); auto rightRate = random(5.0, 10.0); - q.Where("rate", CondRange, {leftRate, rightRate}).Sort("year", false).Limit(20); + q.Where("rate"sv, CondRange, {leftRate, rightRate}).Sort("year"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -375,6 +380,7 @@ void ApiTvComposite::RangeTreeDoubleSortByTreeInt(benchmark::State& state) { } void ApiTvComposite::RangeTreeDoubleSortByHashInt(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); @@ -382,7 +388,7 @@ void ApiTvComposite::RangeTreeDoubleSortByHashInt(benchmark::State& state) { auto leftRate = random(0.0, 4.99); auto rightRate = random(5.0, 10.0); - q.Where("rate", CondRange, {leftRate, rightRate}).Sort("age", false).Limit(20); + q.Where("rate"sv, CondRange, {leftRate, rightRate}).Sort("age"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -391,13 +397,14 @@ void ApiTvComposite::RangeTreeDoubleSortByHashInt(benchmark::State& state) { } void ApiTvComposite::RangeTreeStrSortByHashStrCollateASCII(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); auto idRange = id_seq_->GetRandomIdRange(id_seq_->Count() * 0.02); - q.Where("id", CondRange, {std::to_string(idRange.first), std::to_string(idRange.second)}).Sort("location", false).Limit(20); + q.Where("id"sv, CondRange, {std::to_string(idRange.first), std::to_string(idRange.second)}).Sort("location"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -406,13 +413,14 @@ void ApiTvComposite::RangeTreeStrSortByHashStrCollateASCII(benchmark::State& sta } void ApiTvComposite::RangeTreeStrSortByHashStrCollateUTF8(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); auto idRange = id_seq_->GetRandomIdRange(id_seq_->Count() * 0.02); - q.Where("id", CondRange, {std::to_string(idRange.first), std::to_string(idRange.second)}).Sort("name", false).Limit(20); + q.Where("id"sv, CondRange, {std::to_string(idRange.first), std::to_string(idRange.second)}).Sort("name"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -421,11 +429,12 @@ void ApiTvComposite::RangeTreeStrSortByHashStrCollateUTF8(benchmark::State& stat } void ApiTvComposite::SortByHashInt(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); - q.Sort("id", false).Limit(20); + q.Sort("id"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -434,11 +443,12 @@ void ApiTvComposite::SortByHashInt(benchmark::State& state) { } void ApiTvComposite::ForcedSortByHashInt(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); - q.Sort("id", false, {10, 20, 30, 40, 50}).Limit(20); + q.Sort("id"sv, false, {10, 20, 30, 40, 50}).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -447,11 +457,12 @@ void ApiTvComposite::ForcedSortByHashInt(benchmark::State& state) { } void ApiTvComposite::ForcedSortWithSecondCondition(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); - q.Sort("id", false, {10, 20, 30, 40, 50}).Sort("location", false).Limit(20); + q.Sort("id"sv, false, {10, 20, 30, 40, 50}).Sort("location"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -474,11 +485,12 @@ void ApiTvComposite::Query2CondIdSetComposite(benchmark::State& state) { } void ApiTvComposite::SortByHashStrCollateASCII(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); - q.Sort("location", false).Limit(20); + q.Sort("location"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -487,11 +499,12 @@ void ApiTvComposite::SortByHashStrCollateASCII(benchmark::State& state) { } void ApiTvComposite::SortByHashStrCollateUTF8(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); - q.Sort("name", false).Limit(20); + q.Sort("name"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -500,11 +513,12 @@ void ApiTvComposite::SortByHashStrCollateUTF8(benchmark::State& state) { } void ApiTvComposite::SortByHashCompositeIntInt(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); - q.Sort("id+start_time", false).Limit(20); + q.Sort("id+start_time"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -513,11 +527,12 @@ void ApiTvComposite::SortByHashCompositeIntInt(benchmark::State& state) { } void ApiTvComposite::SortByHashCompositeIntStr(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); - q.Sort("id+genre", false).Limit(20); + q.Sort("id+genre"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -526,11 +541,12 @@ void ApiTvComposite::SortByHashCompositeIntStr(benchmark::State& state) { } void ApiTvComposite::SortByTreeCompositeIntInt(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); - q.Sort("id+year", false).Limit(20); + q.Sort("id+year"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); @@ -539,11 +555,12 @@ void ApiTvComposite::SortByTreeCompositeIntInt(benchmark::State& state) { } void ApiTvComposite::SortByTreeCompositeIntStrCollateUTF8(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); - q.Sort("id+name", false).Limit(20); + q.Sort("id+name"sv, false).Limit(20); QueryResults qres; auto err = db_->Select(q, qres); diff --git a/cpp_src/gtests/bench/fixtures/api_tv_simple.cc b/cpp_src/gtests/bench/fixtures/api_tv_simple.cc index a8b2650a1..e628e2b5f 100644 --- a/cpp_src/gtests/bench/fixtures/api_tv_simple.cc +++ b/cpp_src/gtests/bench/fixtures/api_tv_simple.cc @@ -108,7 +108,9 @@ void ApiTvSimple::RegisterAllCases() { Register("Query2CondIdSet20000", &ApiTvSimple::Query2CondIdSet20000, this); #endif // !defined(REINDEX_WITH_ASAN) && !defined(REINDEX_WITH_TSAN) && !defined(RX_WITH_STDLIB_DEBUG) Register("FromCJSON", &ApiTvSimple::FromCJSON, this); + Register("FromCJSONPKOnly", &ApiTvSimple::FromCJSONPKOnly, this); Register("GetCJSON", &ApiTvSimple::GetCJSON, this); + Register("ExtractField", &ApiTvSimple::ExtractField, this); // NOLINTEND(*cplusplus.NewDeleteLeaks) } @@ -263,12 +265,13 @@ reindexer::Error ApiTvSimple::prepareCJsonBench() { auto err = db_->AddNamespace(cjsonNsDef); if (!err.ok()) return err; + fieldsToExtract_.clear(); itemForCjsonBench_ = std::make_unique(db_->NewItem(cjsonNsName_)); if (!itemForCjsonBench_->Status().ok()) return itemForCjsonBench_->Status(); wrSer_.Reset(); reindexer::JsonBuilder bld(wrSer_); constexpr size_t len = 10; - bld.Put("id", 0); + bld.Put("id", kCjsonBenchItemID); bld.Put("bool_-_index", rand() % 2); bld.Put("int_-_index", rand()); bld.Put("int_hash_index", rand()); @@ -296,20 +299,29 @@ reindexer::Error ApiTvSimple::prepareCJsonBench() { bld.Array("string_tree_array_index", randStringArray()); for (size_t i = 0; i < 10; ++i) { const std::string i_str = std::to_string(i); + fieldsToExtract_.emplace_back("bool_field_" + i_str); bld.Put("bool_field_" + i_str, rand() % 2); + fieldsToExtract_.emplace_back("int_field_" + i_str); bld.Put("int_field_" + i_str, rand()); + fieldsToExtract_.emplace_back("double_field_" + i_str); bld.Put("double_field_" + i_str, rand() / double(rand() + 1)); + fieldsToExtract_.emplace_back("string_field_" + i_str); bld.Put("string_field_" + i_str, randString(len)); bld.Array("bool_array_field_" + i_str, randBoolArray()); bld.Array("int_array_field_" + i_str, randIntArray()); bld.Array("double_array_field_" + i_str, randDoubleArray()); bld.Array("string_array_field_" + i_str, randStringArray()); { - auto obj = bld.Object("nested_obj_" + i_str); + const std::string nestedBase("nested_obj_" + i_str); + auto obj = bld.Object(nestedBase); obj.Put("bool_field", rand() % 2); + fieldsToExtract_.emplace_back(nestedBase + ".bool_field"); obj.Put("int_field", rand()); + fieldsToExtract_.emplace_back(nestedBase + ".int_field"); obj.Put("double_field", rand() / double(rand() + 1)); + fieldsToExtract_.emplace_back(nestedBase + ".double_field"); obj.Put("string_field", randString(len)); + fieldsToExtract_.emplace_back(nestedBase + ".string_field"); obj.Array("bool_array_field", randBoolArray()); obj.Array("int_array_field", randIntArray()); obj.Array("double_array_field", randDoubleArray()); @@ -453,6 +465,30 @@ void ApiTvSimple::FromCJSON(benchmark::State& state) { } } +void ApiTvSimple::FromCJSONPKOnly(benchmark::State& state) { + reindexer::Item item = db_->NewItem(cjsonNsName_); + { + AllocsTracker allocsTracker(state); + for (auto _ : state) { // NOLINT(*deadcode.DeadStores) + const auto err = item.FromCJSON(cjsonOfItem_, true); + if (!err.ok()) state.SkipWithError(err.what().c_str()); + if (!item.Status().ok()) state.SkipWithError(item.Status().what().c_str()); + } + } + assertrx(item["id"].Get() == kCjsonBenchItemID); +} + +void ApiTvSimple::ExtractField(benchmark::State& state) { + assertrx(itemForCjsonBench_); + assertrx(fieldsToExtract_.size()); + AllocsTracker allocsTracker(state); + for (auto _ : state) { // NOLINT(*deadcode.DeadStores) + const auto& fieldName = fieldsToExtract_[rand() % fieldsToExtract_.size()]; + const auto va = VariantArray((*itemForCjsonBench_)[fieldName]); + if (va.size() != 1) state.SkipWithError(fmt::sprintf("Unexpected result size: %d", va.size()).c_str()); + } +} + void ApiTvSimple::StringsSelect(benchmark::State& state) { AllocsTracker allocsTracker(state); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) diff --git a/cpp_src/gtests/bench/fixtures/api_tv_simple.h b/cpp_src/gtests/bench/fixtures/api_tv_simple.h index 7f1587d6b..45b9a0d4c 100644 --- a/cpp_src/gtests/bench/fixtures/api_tv_simple.h +++ b/cpp_src/gtests/bench/fixtures/api_tv_simple.h @@ -80,7 +80,9 @@ class ApiTvSimple : private BaseFixture { void Query4CondRangeTotal(State& state); void Query4CondRangeCachedTotal(State& state); void FromCJSON(State&); + void FromCJSONPKOnly(State&); void GetCJSON(State&); + void ExtractField(State&); void query2CondIdSet(State& state, const std::vector>& idsets); reindexer::Error prepareCJsonBench(); @@ -108,5 +110,7 @@ class ApiTvSimple : private BaseFixture { std::string innerJoinLowSelectivityRightNs_{"inner_join_low_selectivity_right_ns"}; std::string cjsonNsName_{"cjson_ns_name"}; std::unique_ptr itemForCjsonBench_; + std::vector fieldsToExtract_; + constexpr static int kCjsonBenchItemID = 9973; std::string cjsonOfItem_; }; diff --git a/cpp_src/gtests/bench/fixtures/ft_fixture.cc b/cpp_src/gtests/bench/fixtures/ft_fixture.cc index b02143761..955ee9d66 100644 --- a/cpp_src/gtests/bench/fixtures/ft_fixture.cc +++ b/cpp_src/gtests/bench/fixtures/ft_fixture.cc @@ -7,6 +7,7 @@ #include "core/cjson/jsonbuilder.h" #include "core/ft/config/ftfastconfig.h" +#include "tools/errors.h" #include "tools/stringstools.h" #include @@ -241,18 +242,33 @@ reindexer::Item FullText::MakeItem(benchmark::State&) { void FullText::BuildInsertSteps(State& state) { AllocsTracker allocsTracker(state, printFlags); - db_->DropNamespace(nsdef_.name); + auto err = db_->DropNamespace(nsdef_.name); + if (!err.ok()) { + state.SkipWithError(err.what().c_str()); + assertf(err.ok(), "%s", err.what()); + } id_seq_->Reset(); - auto err = BaseFixture::Initialize(); + err = BaseFixture::Initialize(); + if (!err.ok()) { + state.SkipWithError(err.what().c_str()); + assertf(err.ok(), "%s", err.what()); + } size_t i = 0; size_t mem = 0; + assert(!words_.empty()); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) auto item = MakeSpecialItem(); - if (!item.Status().ok()) state.SkipWithError(item.Status().what().c_str()); + if (!item.Status().ok()) { + state.SkipWithError(item.Status().what().c_str()); + assertf(item.Status().ok(), "%s", item.Status().what()); + } err = db_->Insert(nsdef_.name, item); - if (!err.ok()) state.SkipWithError(err.what().c_str()); + if (!err.ok()) { + state.SkipWithError(err.what().c_str()); + assertf(err.ok(), "%s", err.what()); + } if (i % 12000 == 0) { Query q(nsdef_.name); @@ -279,7 +295,10 @@ void FullText::Insert(State& state) { AllocsTracker allocsTracker(state, printFlags); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) auto item = MakeItem(state); - if (!item.Status().ok()) state.SkipWithError(item.Status().what().c_str()); + if (!item.Status().ok()) { + state.SkipWithError(item.Status().what().c_str()); + continue; + } auto err = db_->Insert(nsdef_.name, item); if (!err.ok()) state.SkipWithError(err.what().c_str()); @@ -290,10 +309,11 @@ void FullText::Insert(State& state) { } void FullText::BuildCommonIndexes(benchmark::State& state) { + using namespace std::string_view_literals; AllocsTracker allocsTracker(state, printFlags); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) Query q(nsdef_.name); - q.Where("year", CondRange, {2010, 2016}).Limit(20).Sort("year", false); + q.Where("year"sv, CondRange, {2010, 2016}).Limit(20).Sort("year"sv, false); std::this_thread::sleep_for(std::chrono::milliseconds(2000)); QueryResults qres; @@ -325,7 +345,10 @@ void FullText::BuildAndInsertLowWordsDiversityNs(State& state) { item["description1"] = d1; item["description2"] = d2; - if (!item.Status().ok()) state.SkipWithError(item.Status().what().c_str()); + if (!item.Status().ok()) { + state.SkipWithError(item.Status().what().c_str()); + continue; + } auto err = db_->Insert(lowWordsDiversityNsDef_.name, item); if (!err.ok()) state.SkipWithError(err.what().c_str()); @@ -908,7 +931,13 @@ void FullText::InitForAlternatingUpdatesAndSelects(State& state) { ftCfg.optimization = opt; ftIndexOpts.config = ftCfg.GetJson({}); AllocsTracker allocsTracker(state, printFlags); - db_->DropNamespace(alternatingNs_); + auto err = db_->DropNamespace(alternatingNs_); + if (!err.ok()) { + if (err.code() != errNotFound || err.what() != "Namespace '" + alternatingNs_ + "' does not exist") { + state.SkipWithError(err.what().c_str()); + assertf(err.ok(), "%s", err.what()); + } + } for (auto _ : state) { // NOLINT(*deadcode.DeadStores) NamespaceDef nsDef{alternatingNs_}; nsDef.AddIndex("id", "hash", "int", IndexOpts().PK()) @@ -918,7 +947,10 @@ void FullText::InitForAlternatingUpdatesAndSelects(State& state) { .AddIndex("search_comp", {"search1", "search2"}, "text", "composite", ftIndexOpts) .AddIndex("search_comp_not_index_fields", {"field1", "field2"}, "text", "composite", ftIndexOpts); auto err = db_->AddNamespace(nsDef); - if (!err.ok()) state.SkipWithError(err.what().c_str()); + if (!err.ok()) { + state.SkipWithError(err.what().c_str()); + assertf(err.ok(), "%s", err.what()); + } values_.clear(); values_.reserve(kNsSize); reindexer::WrSerializer ser; @@ -934,18 +966,25 @@ void FullText::InitForAlternatingUpdatesAndSelects(State& state) { bld.Put("rand", rand()); bld.End(); auto item = db_->NewItem(alternatingNs_); - if (!item.Status().ok()) state.SkipWithError(item.Status().what().c_str()); + if (!item.Status().ok()) { + state.SkipWithError(item.Status().what().c_str()); + continue; + } err = item.FromJSON(ser.Slice()); - if (!err.ok()) state.SkipWithError(err.what().c_str()); + if (!err.ok()) { + state.SkipWithError(err.what().c_str()); + continue; + } err = db_->Insert(alternatingNs_, item); if (!err.ok()) state.SkipWithError(err.what().c_str()); } } - auto err = db_->Commit(alternatingNs_); + err = db_->Commit(alternatingNs_); if (!err.ok()) state.SkipWithError(err.what().c_str()); // Init index build + assert(!values_.empty()); Query q = Query(alternatingNs_) .Where("search1", CondEq, @@ -970,6 +1009,7 @@ void FullText::InitForAlternatingUpdatesAndSelects(State& state) { void FullText::updateAlternatingNs(reindexer::WrSerializer& ser, benchmark::State& state) { using namespace std::string_literals; + assert(!values_.empty()); const int i = randomGenerator_(randomEngine_, std::uniform_int_distribution::param_type{0, int(values_.size() - 1)}); ser.Reset(); reindexer::JsonBuilder bld(ser); @@ -982,9 +1022,15 @@ void FullText::updateAlternatingNs(reindexer::WrSerializer& ser, benchmark::Stat bld.End(); auto item = db_->NewItem(alternatingNs_); item.Unsafe(false); - if (!item.Status().ok()) state.SkipWithError(item.Status().what().c_str()); + if (!item.Status().ok()) { + state.SkipWithError(item.Status().what().c_str()); + return; + } auto err = item.FromJSON(ser.Slice()); - if (!err.ok()) state.SkipWithError(err.what().c_str()); + if (!err.ok()) { + state.SkipWithError(err.what().c_str()); + return; + } err = db_->Update(alternatingNs_, item); if (!err.ok()) state.SkipWithError(err.what().c_str()); @@ -997,6 +1043,7 @@ void FullText::updateAlternatingNs(reindexer::WrSerializer& ser, benchmark::Stat } void FullText::AlternatingUpdatesAndSelects(benchmark::State& state) { + assert(!values_.empty()); reindexer::WrSerializer ser; AllocsTracker allocsTracker(state, printFlags); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) @@ -1015,6 +1062,7 @@ void FullText::AlternatingUpdatesAndSelects(benchmark::State& state) { } void FullText::AlternatingUpdatesAndSelectsByComposite(benchmark::State& state) { + assert(!values_.empty()); reindexer::WrSerializer ser; AllocsTracker allocsTracker(state, printFlags); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) @@ -1030,6 +1078,7 @@ void FullText::AlternatingUpdatesAndSelectsByComposite(benchmark::State& state) } void FullText::AlternatingUpdatesAndSelectsByCompositeByNotIndexFields(benchmark::State& state) { + assert(!values_.empty()); reindexer::WrSerializer ser; AllocsTracker allocsTracker(state, printFlags); for (auto _ : state) { // NOLINT(*deadcode.DeadStores) diff --git a/cpp_src/gtests/bench/fixtures/ft_fixture.h b/cpp_src/gtests/bench/fixtures/ft_fixture.h index 309c0de75..13f69eb43 100644 --- a/cpp_src/gtests/bench/fixtures/ft_fixture.h +++ b/cpp_src/gtests/bench/fixtures/ft_fixture.h @@ -200,7 +200,7 @@ class FullText : private BaseFixture { void updateAlternatingNs(reindexer::WrSerializer&, benchmark::State&); reindexer::Error readDictFile(const std::string& fileName, std::vector& words); - const char* alternatingNs_ = "FtAlternatingUpdatesAndSelects"; + const std::string alternatingNs_ = "FtAlternatingUpdatesAndSelects"; size_t raw_data_sz_ = 0; std::mt19937 randomEngine_{1}; diff --git a/cpp_src/gtests/bench/ft_bench.cc b/cpp_src/gtests/bench/ft_bench.cc index 2358cd617..05b81d1de 100644 --- a/cpp_src/gtests/bench/ft_bench.cc +++ b/cpp_src/gtests/bench/ft_bench.cc @@ -30,11 +30,12 @@ int main(int argc, char** argv) { } shared_ptr DB = std::make_shared(); - DB->Connect("builtin://" + kStoragePath); + auto err = DB->Connect("builtin://" + kStoragePath); + if (!err.ok()) return err.code(); FullText ft(DB.get(), "fulltext", kItemsInBenchDataset); - auto err = ft.Initialize(); + err = ft.Initialize(); if (!err.ok()) return err.code(); ::benchmark::Initialize(&argc, argv); diff --git a/cpp_src/gtests/bench/reindexer_bench.cc b/cpp_src/gtests/bench/reindexer_bench.cc index 2053b0d1f..216a8d42a 100644 --- a/cpp_src/gtests/bench/reindexer_bench.cc +++ b/cpp_src/gtests/bench/reindexer_bench.cc @@ -39,7 +39,8 @@ int main(int argc, char** argv) { } shared_ptr DB = std::make_shared(); - DB->Connect("builtin://" + kStoragePath); + auto err = DB->Connect("builtin://" + kStoragePath); + if (!err.ok()) return err.code(); JoinItems joinItems(DB.get(), 500); ApiTvSimple apiTvSimple(DB.get(), "ApiTvSimple", kItemsInBenchDataset); @@ -48,7 +49,7 @@ int main(int argc, char** argv) { Geometry geometry(DB.get(), "Geometry", kItemsInBenchDataset); Aggregation aggregation(DB.get(), "Aggregation", kItemsInBenchDataset); - auto err = apiTvSimple.Initialize(); + err = apiTvSimple.Initialize(); if (!err.ok()) return err.code(); err = apiTvSimpleComparators.Initialize(); diff --git a/cpp_src/gtests/tests/API/base_tests.cc b/cpp_src/gtests/tests/API/base_tests.cc index a466ceb28..b6e72f672 100644 --- a/cpp_src/gtests/tests/API/base_tests.cc +++ b/cpp_src/gtests/tests/API/base_tests.cc @@ -89,11 +89,14 @@ TEST_F(ReindexerApi, RenameNamespace) { auto getRowsInJSON = [&](const std::string& namespaceName, std::vector& resStrings) { QueryResults result; - rt.reindexer->Select(Query(namespaceName), result); + auto err = rt.reindexer->Select(Query(namespaceName), result); + ASSERT_TRUE(err.ok()) << err.what(); resStrings.clear(); for (auto it = result.begin(); it != result.end(); ++it) { + ASSERT_TRUE(it.Status().ok()) << it.Status().what(); reindexer::WrSerializer sr; - it.GetJSON(sr, false); + err = it.GetJSON(sr, false); + ASSERT_TRUE(err.ok()) << err.what(); std::string_view sv = sr.Slice(); resStrings.emplace_back(sv.data(), sv.size()); } @@ -246,8 +249,7 @@ TEST_F(ReindexerApi, DistinctCompositeIndex) { EXPECT_TRUE(err.ok()) << err.what(); } - Query q; - q._namespace = default_namespace; + Query q{default_namespace}; q.Distinct("v1+v2"); { QueryResults qr; @@ -1333,15 +1335,15 @@ TEST_F(ReindexerApi, DistinctQueriesEncodingTest) { std::string dsl = q1.GetJSON(); Query q2; q2.FromJSON(dsl); - EXPECT_TRUE(q1 == q2); + EXPECT_EQ(q1, q2) << "q1: " << q1.GetSQL() << "\nq2: " << q2.GetSQL(); Query q3{Query(default_namespace).Distinct("name").Distinct("city").Where("id", CondGt, static_cast(10))}; std::string sql2 = q3.GetSQL(); Query q4; q4.FromSQL(sql2); - EXPECT_TRUE(q3 == q4); - EXPECT_TRUE(sql2 == q4.GetSQL()); + EXPECT_EQ(q3, q4) << "q3: " << q3.GetSQL() << "\nq4: " << q4.GetSQL(); + EXPECT_EQ(sql2, q4.GetSQL()); } TEST_F(ReindexerApi, ContextCancelingTest) { diff --git a/cpp_src/gtests/tests/fixtures/ft_api.cc b/cpp_src/gtests/tests/fixtures/ft_api.cc index d4668d665..9f5c9ac81 100644 --- a/cpp_src/gtests/tests/fixtures/ft_api.cc +++ b/cpp_src/gtests/tests/fixtures/ft_api.cc @@ -61,7 +61,10 @@ reindexer::Error FTApi::SetFTConfig(const reindexer::FtFastConfig& ftCfg, const fieldsMap.emplace(fields[i], i); } std::vector nses; - rt.reindexer->EnumNamespaces(nses, reindexer::EnumNamespacesOpts().WithFilter(ns)); + auto err = rt.reindexer->EnumNamespaces(nses, reindexer::EnumNamespacesOpts().WithFilter(ns)); + if (!err.ok()) { + return err; + } const auto it = std::find_if(nses[0].indexes.begin(), nses[0].indexes.end(), [&index](const reindexer::IndexDef& idef) { return idef.name_ == index; }); it->opts_.SetConfig(ftCfg.GetJson(fieldsMap)); @@ -174,11 +177,11 @@ reindexer::QueryResults FTApi::SimpleSelect3(std::string word) { return res; } -void FTApi::Delete(int id) { +reindexer::Error FTApi::Delete(int id) { reindexer::Item item = rt.NewItem("nm1"); item["id"] = id; - this->rt.reindexer->Delete("nm1", item); + return this->rt.reindexer->Delete("nm1", item); } reindexer::QueryResults FTApi::SimpleCompositeSelect(std::string word) { diff --git a/cpp_src/gtests/tests/fixtures/ft_api.h b/cpp_src/gtests/tests/fixtures/ft_api.h index 5e56bf8b3..542630dec 100644 --- a/cpp_src/gtests/tests/fixtures/ft_api.h +++ b/cpp_src/gtests/tests/fixtures/ft_api.h @@ -32,7 +32,7 @@ class FTApi : public ::testing::TestWithParam& indexes) const { + const FieldType fldType = std::visit( + reindexer::overloaded{[](const Child& c) noexcept { return c.type; }, [](const Children&) noexcept { return FieldType::Struct; }}, + content_); IndexOpts opts; - const bool pk = rnd.PkIndex(isPk); + const bool pk = rnd.PkIndex(isPk_); opts.PK(pk); - opts.Array(rnd.RndArrayField(isArray)); - opts.Sparse(rnd.SparseIndex(pk)); + opts.Array(rnd.RndArrayField(isArray_) == IsArray::Yes); + opts.Sparse(rnd.RndSparseIndex(isSparse_)); opts.Dense(rnd.DenseIndex()); opts.RTreeType(static_cast(rnd.RndInt(IndexOpts::Linear, IndexOpts::RStar))); - FieldType fldType = std::visit( - reindexer::overloaded{[](const Child& c) noexcept { return c.type; }, [](const Children&) noexcept { return FieldType::Struct; }}, - content); std::string fieldType = rnd.IndexFieldType(fldType); - std::string indexType = rnd.RndIndexType(fldType, isPk); + std::string indexType{ToText(rnd.RndIndexType(type_))}; reindexer::JsonPaths jsonPaths; std::visit(reindexer::overloaded{[&](const Child& c) { jsonPaths.push_back(scheme.GetJsonPath(c.fieldPath)); }, [&](const Children& c) { + jsonPaths.reserve(c.size()); for (const auto& child : c) { + if (rnd.RndBool(0.5)) { + std::vector scalarIndexes; + scalarIndexes.reserve(indexes.size()); + for (size_t i = 0, s = indexes.size(); i < s; ++i) { + if (const auto* c = std::get_if(&indexes[i].content_); + c && c->fieldPath == child.fieldPath) { + scalarIndexes.push_back(i); + } + } + if (!scalarIndexes.empty()) { + jsonPaths.push_back(indexes[rnd.RndWhich(scalarIndexes)].name_); + continue; + } + } jsonPaths.push_back(scheme.GetJsonPath(child.fieldPath)); } }}, - content); - return {name, std::move(jsonPaths), std::move(indexType), std::move(fieldType), std::move(opts), rnd.ExpiredIndex()}; + content_); + return {name_, std::move(jsonPaths), std::move(indexType), std::move(fieldType), std::move(opts), rnd.ExpiredIndex()}; } void Index::Dump(std::ostream& os, const NsScheme& scheme, size_t offset) const { for (size_t i = 0; i < offset; ++i) os << " "; os << "{\n"; for (size_t i = 0; i <= offset; ++i) os << " "; - os << "name: " << name << '\n'; + os << "name: " << name_ << '\n'; + for (size_t i = 0; i <= offset; ++i) os << " "; + os << "type: " << type_ << '\n'; for (size_t i = 0; i <= offset; ++i) os << " "; - os << "pk: " << (isPk ? "true" : "false") << '\n'; + os << "pk: " << std::boolalpha << isPk_ << '\n'; for (size_t i = 0; i <= offset; ++i) os << " "; - os << "array: " << (isArray ? "true" : "false") << '\n'; + os << "array: " << std::boolalpha << IsArray() << '\n'; + for (size_t i = 0; i <= offset; ++i) os << " "; + os << "sparse: " << std::boolalpha << (IsSparse() == IsSparse::Yes) << '\n'; for (size_t i = 0; i <= offset; ++i) os << " "; std::visit(reindexer::overloaded{[&](const Child& child) { + os << "composite: false\n"; + for (size_t i = 0; i <= offset; ++i) os << " "; os << "field: {\n"; for (size_t i = 0; i < offset + 2; ++i) os << " "; os << "type: " << child.type << '\n'; @@ -48,6 +72,8 @@ void Index::Dump(std::ostream& os, const NsScheme& scheme, size_t offset) const os << "}\n"; }, [&](const Children& children) { + os << "composite: true\n"; + for (size_t i = 0; i <= offset; ++i) os << " "; os << "fields: [\n"; for (const auto& c : children) { for (size_t i = 0; i < offset + 2; ++i) os << " "; @@ -62,7 +88,7 @@ void Index::Dump(std::ostream& os, const NsScheme& scheme, size_t offset) const for (size_t i = 0; i <= offset; ++i) os << " "; os << "]\n"; }}, - content); + content_); } } // namespace fuzzing diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/index.h b/cpp_src/gtests/tests/fixtures/fuzzing/index.h index 1daeb9266..80407211c 100644 --- a/cpp_src/gtests/tests/fixtures/fuzzing/index.h +++ b/cpp_src/gtests/tests/fixtures/fuzzing/index.h @@ -1,7 +1,9 @@ #pragma once -#include "ns_scheme.h" -#include "random_generator.h" +#include +#include +#include +#include "types.h" namespace reindexer { struct IndexDef; @@ -9,20 +11,43 @@ struct IndexDef; namespace fuzzing { -struct Index { - reindexer::IndexDef IndexDef(RandomGenerator&, const NsScheme&) const; +class RandomGenerator; +class NsScheme; +class Index { +public: struct Child { FieldType type; FieldPath fieldPath; }; using Children = std::vector; - std::string name; - std::variant content; - bool isPk{false}; - bool isArray{false}; + Index(std::string name, IndexType type, IsArray isArray, IsSparse isSparse, Children content) noexcept + : name_{std::move(name)}, type_{type}, content_{std::move(content)}, isArray_{isArray}, isSparse_{isSparse} {} + Index(std::string name, IndexType type, IsArray isArray, IsSparse isSparse, Child content) noexcept + : name_{std::move(name)}, type_{type}, content_{std::move(content)}, isArray_{isArray}, isSparse_{isSparse} {} + + const std::string& Name() const& noexcept { return name_; } + const std::string& Name() const&& = delete; + IndexType Type() const noexcept { return type_; } + const auto& Content() const& noexcept { return content_; } + const auto& Content() const&& = delete; + bool IsPk() const noexcept { return isPk_; } + void SetPk() noexcept { isPk_ = true; } + bool IsArray() const noexcept { return isArray_ == IsArray::Yes; } + auto IsSparse() const noexcept { return isSparse_; } + + reindexer::IndexDef IndexDef(RandomGenerator&, const NsScheme&, const std::vector&) const; + void Dump(std::ostream&, const NsScheme&, size_t offset) const; + +private: + std::string name_; + IndexType type_; + std::variant content_; + bool isPk_{false}; + enum IsArray isArray_ { IsArray::No }; + enum IsSparse isSparse_ { IsSparse::No }; }; } // namespace fuzzing diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/ns.cc b/cpp_src/gtests/tests/fixtures/fuzzing/ns.cc index 93d5f16fc..5a520d4a0 100644 --- a/cpp_src/gtests/tests/fixtures/fuzzing/ns.cc +++ b/cpp_src/gtests/tests/fixtures/fuzzing/ns.cc @@ -1,5 +1,8 @@ #include "ns.h" #include +#include "estl/overloaded.h" +#include "index.h" +#include "tools/assertrx.h" struct FieldPathHash { size_t operator()(const fuzzing::FieldPath& fp) const noexcept { @@ -30,47 +33,74 @@ static bool availablePkFieldType(FieldType ft) { } } -Ns::Ns(std::string name, std::ostream& os, RandomGenerator::ErrFactorType errorFactor) - : name_{std::move(name)}, rndGen_{os, errorFactor}, scheme_{name_, rndGen_} { - std::unordered_set generatedNames; +static bool availablePkIndexType(IndexType it) { + switch (it) { + case IndexType::Store: + case IndexType::FastFT: + case IndexType::FuzzyFT: + case IndexType::RTree: + return false; + case IndexType::Hash: + case IndexType::Tree: + case IndexType::Ttl: + return true; + default: + assertrx(false); + std::abort(); + } +} + +Ns::Ns(std::string name, RandomGenerator::ErrFactorType errorFactor) + : name_{std::move(name)}, rndGen_{errorFactor}, scheme_{name_, rndGen_} { + std::unordered_set usedIndexNames; std::unordered_set usedPaths; constexpr static size_t kMaxTries = 10; const size_t idxCount = rndGen_.IndexesCount(); const bool withErr = rndGen_.RndErr(); indexes_.reserve(idxCount); + std::vector scalarIndexes; + scalarIndexes.reserve(idxCount); for (size_t i = 0; i < idxCount; ++i) { const bool uniqueName = rndGen_.UniqueName(); - if (rndGen_.CompositeIndex()) { - bool fail = false; - Index index{uniqueName ? rndGen_.IndexName(generatedNames) : std::string{}, Index::Children{}}; - auto& children = std::get(index.content); - const size_t size = rndGen_.CompositeIndexSize(); - children.reserve(size); - for (size_t i = 0; i < size; ++i) { - auto fldPath = rndGen_.RndScalarField(scheme_); - FieldType fldType; - if (scheme_.IsStruct(fldPath)) { - if (!rndGen_.RndErr()) { - fail = true; - break; - } - fldType = rndGen_.RndFieldType(); + if (rndGen_.CompositeIndex(scalarIndexes.size())) { + bool array = false; + bool containsUuid = false; + std::string name; + Index::Children children; + const auto fields = rndGen_.RndFieldsForCompositeIndex(scalarIndexes); + children.reserve(fields.size()); + for (size_t f : fields) { + Index::Child fieldData; + if (f < indexes_.size()) { + const auto& idx = indexes_[f]; + fieldData = std::get(idx.Content()); + array |= idx.IsArray(); } else { - fldType = scheme_.GetFieldType(fldPath); - } - if (!uniqueName) { - if (!index.name.empty()) index.name += '+'; - if (fldPath.empty()) { - index.name += rndGen_.FieldName(generatedNames); + fieldData.fieldPath = rndGen_.RndScalarField(scheme_); + if (scheme_.IsStruct(fieldData.fieldPath)) { + fieldData.type = rndGen_.RndFieldType(); } else { - index.name += scheme_.GetJsonPath(fldPath); + fieldData.type = scheme_.GetFieldType(fieldData.fieldPath); } } - children.emplace_back(Index::Child{fldType, std::move(fldPath)}); + if (!uniqueName) { + if (!name.empty()) name += '+'; + name += scheme_.GetJsonPath(fieldData.fieldPath); + } + containsUuid |= fieldData.type == FieldType::Uuid; + children.emplace_back(std::move(fieldData)); + } + const auto indexType = + containsUuid ? rndGen_.RndIndexType({FieldType::Struct, FieldType::Uuid}) : rndGen_.RndIndexType({FieldType::Struct}); + if (uniqueName) { + name = rndGen_.IndexName(usedIndexNames); + } else if (!usedIndexNames.insert(name).second) { + name = rndGen_.IndexName(usedIndexNames); + usedIndexNames.insert(name); } - if (fail) continue; - index.isArray = rndGen_.RndArrayField(false); - indexes_.emplace_back(std::move(index)); + + indexes_.emplace_back(std::move(name), indexType, rndGen_.RndArrayField(array ? IsArray::Yes : IsArray::No), IsSparse::No, + std::move(children)); } else { FieldPath fldPath; size_t tryCounts = 0; @@ -82,47 +112,73 @@ Ns::Ns(std::string name, std::ostream& os, RandomGenerator::ErrFactorType errorF if (scheme_.IsStruct(fldPath)) { if (!rndGen_.RndErr()) continue; const auto fldType = rndGen_.RndFieldType(); - indexes_.emplace_back(Index{rndGen_.IndexName(generatedNames), Index::Child{fldType, std::move(fldPath)}}); + indexes_.emplace_back(rndGen_.IndexName(usedIndexNames), rndGen_.RndIndexType({fldType}), + rndGen_.RndBool(0.5) ? IsArray::Yes : IsArray::No, + rndGen_.RndBool(0.5) ? IsSparse::Yes : IsSparse::No, Index::Child{fldType, std::move(fldPath)}); } else { const auto fldType = scheme_.GetFieldType(fldPath); - const bool isArray = scheme_.IsArray(fldPath); - std::string idxName = uniqueName ? rndGen_.IndexName(generatedNames) : scheme_.GetJsonPath(fldPath); - indexes_.emplace_back(Index{std::move(idxName), Index::Child{fldType, std::move(fldPath)}}); - indexes_.back().isArray = rndGen_.RndArrayField(isArray); + const auto isArray = scheme_.IsArray(fldPath); + std::string idxName; + if (uniqueName) { + idxName = rndGen_.IndexName(usedIndexNames); + } else { + idxName = scheme_.GetJsonPath(fldPath); + if (!usedIndexNames.insert(idxName).second) { + idxName = rndGen_.IndexName(usedIndexNames); + usedIndexNames.insert(idxName); + } + } + indexes_.emplace_back(std::move(idxName), rndGen_.RndIndexType({fldType}), rndGen_.RndArrayField(isArray), + rndGen_.RndSparseIndex(fldType), Index::Child{fldType, std::move(fldPath)}); + } + if (const auto& idx = indexes_.back(); + !idx.IsArray() && idx.IsSparse() == IsSparse::No && + std::get(idx.Content()).type != FieldType::Point) { // TODO remove point check after #1352 + scalarIndexes.push_back(indexes_.size() - 1); } } } + if (rndGen_.RndErr()) { + // Do not set PK index + return; + } std::vector ii; for (size_t i = 0, s = indexes_.size(); i < s; ++i) { const auto& idx = indexes_[i]; - if (!idx.isArray && - (std::holds_alternative(idx.content) || availablePkFieldType(std::get(idx.content).type))) { + if (!idx.IsArray() && idx.IsSparse() == IsSparse::No && availablePkIndexType(idx.Type()) && + (std::holds_alternative(idx.Content()) || availablePkFieldType(std::get(idx.Content()).type))) { ii.push_back(i); } } if (ii.empty()) { - if (!rndGen_.RndErr()) { - auto path = scheme_.AddRndPkField(rndGen_); - const auto fldType = scheme_.GetFieldType(path); - std::string name = rndGen_.UniqueName() ? rndGen_.IndexName(generatedNames) : scheme_.GetJsonPath(path); - indexes_.emplace_back(Index{std::move(name), Index::Child{fldType, std::move(path)}}); - indexes_.back().isArray = false; - indexes_.back().isPk = true; + auto path = scheme_.AddRndPkField(rndGen_); + const auto fldType = scheme_.GetFieldType(path); + std::string name; + if (rndGen_.UniqueName()) { + name = rndGen_.IndexName(usedIndexNames); + } else { + name = scheme_.GetJsonPath(path); + if (!usedIndexNames.insert(name).second) { + name = rndGen_.IndexName(usedIndexNames); + usedIndexNames.insert(name); + } } + indexes_.emplace_back(std::move(name), rndGen_.RndPkIndexType({fldType}), IsArray::No, IsSparse::No, + Index::Child{fldType, std::move(path)}); + indexes_.back().SetPk(); } else { - indexes_[rndGen_.RndWhich(ii)].isPk = true; + indexes_[rndGen_.RndWhich(ii)].SetPk(); } } -void Ns::AddIndex(Index& index, bool isSparse) { - if (isSparse) return; - std::visit(reindexer::overloaded{[&](const Index::Child& c) { scheme_.AddIndex(c.fieldPath, isSparse); }, +void Ns::AddIndexToScheme(const Index& index, size_t indexNumber) { + std::visit(reindexer::overloaded{[&](const Index::Child& c) { scheme_.AddIndex(c.fieldPath, indexNumber, index.IsSparse()); }, [&](const Index::Children& c) { for (const auto& child : c) { - scheme_.AddIndex(child.fieldPath, isSparse); + scheme_.AddIndex(child.fieldPath, indexNumber, index.IsSparse()); } }}, - index.content); + index.Content()); } void Ns::Dump(std::ostream& os) const { diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/ns.h b/cpp_src/gtests/tests/fixtures/fuzzing/ns.h index 53baf5a3e..6c93abb46 100644 --- a/cpp_src/gtests/tests/fixtures/fuzzing/ns.h +++ b/cpp_src/gtests/tests/fixtures/fuzzing/ns.h @@ -1,21 +1,23 @@ #pragma once -#include "index.h" #include "ns_scheme.h" #include "random_generator.h" namespace fuzzing { +class Index; + class Ns { public: - Ns(std::string name, std::ostream&, RandomGenerator::ErrFactorType errorFactor); - std::vector& GetIndexes() noexcept { return indexes_; } + Ns(std::string name, RandomGenerator::ErrFactorType errorFactor); + const std::vector& GetIndexes() const& noexcept { return indexes_; } + std::vector& GetIndexes() & noexcept { return indexes_; } + const std::vector& GetIndexes() const&& = delete; const std::string& GetName() const noexcept { return name_; } const NsScheme& GetScheme() const noexcept { return scheme_; } RandomGenerator& GetRandomGenerator() noexcept { return rndGen_; } - void AddIndex(Index&, bool isSparse); - void NewItem(reindexer::WrSerializer& ser) { scheme_.NewItem(ser, rndGen_); } - const std::vector& GetIndexes() const noexcept { return indexes_; } + void AddIndexToScheme(const Index&, size_t indexNumber); + void NewItem(reindexer::WrSerializer& ser) { scheme_.NewItem(ser, rndGen_, indexes_); } void Dump(std::ostream&) const; private: diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/ns_scheme.cc b/cpp_src/gtests/tests/fixtures/fuzzing/ns_scheme.cc index 8fb4e9e52..8c027dbc6 100644 --- a/cpp_src/gtests/tests/fixtures/fuzzing/ns_scheme.cc +++ b/cpp_src/gtests/tests/fixtures/fuzzing/ns_scheme.cc @@ -1,10 +1,12 @@ #include "ns_scheme.h" #include "core/cjson/jsonbuilder.h" +#include "index.h" +#include "random_generator.h" #include "tools/serializer.h" namespace fuzzing { -void NsScheme::NewItem(reindexer::WrSerializer& ser, RandomGenerator& rnd) { +void NsScheme::NewItem(reindexer::WrSerializer& ser, RandomGenerator& rnd, const std::vector& indexes) { ser.Reset(); if (rnd.RndErr()) { enum Err : uint8_t { Zero, Random, END = Random }; @@ -23,10 +25,195 @@ void NsScheme::NewItem(reindexer::WrSerializer& ser, RandomGenerator& rnd) { } } reindexer::JsonBuilder builder{ser}; - toJson(builder, std::get(ns_.content), rnd); + toJson(builder, std::get(ns_.content), rnd, indexes); } -void NsScheme::rndValueToJson(reindexer::JsonBuilder& builder, FieldType ft, std::string_view name, RandomGenerator& rnd) { +bool NsScheme::IsStruct(const FieldPath& path) const noexcept { + if (path.empty()) return true; + const Node::Children& ref = findLastContainer(path); + assertrx(ref.size() > path.back()); + return std::holds_alternative(ref[path.back()].content); +} + +bool NsScheme::IsPoint(const FieldPath& path) const noexcept { + if (path.empty()) return false; + const Node::Children& ref = findLastContainer(path); + assertrx(ref.size() > path.back()); + return !std::holds_alternative(ref[path.back()].content) && + std::get(ref[path.back()].content).type == FieldType::Point; +} + +bool NsScheme::isTtl(const std::vector& idxNumbers, const std::vector& indexes) noexcept { + for (size_t idx : idxNumbers) { + assertrx(idx < indexes.size()); + if (indexes[idx].Type() == IndexType::Ttl) { + return true; + } + } + return false; +} + +bool NsScheme::IsTtl(const FieldPath& path, const std::vector& indexes) const noexcept { + if (path.empty()) return false; + const Node::Children& ref = findLastContainer(path); + assertrx(ref.size() > path.back()); + if (std::holds_alternative(ref[path.back()].content)) { + return false; + } + return isTtl(std::get(ref[path.back()].content).indexes, indexes); +} + +size_t NsScheme::FieldsCount(const FieldPath& path) const noexcept { + if (path.empty()) { + return std::get(ns_.content).size(); + } + const Node::Children& ref = findLastContainer(path); + assertrx(ref.size() > path.back()); + return std::visit(reindexer::overloaded{[](const Node::Child&) noexcept -> size_t { + assertrx(false); + return 0; + }, + [](const Node::Children& c) noexcept { return c.size(); }}, + ref[path.back()].content); +} + +IsArray NsScheme::IsArray(const FieldPath& path) const noexcept { + if (path.empty()) return ns_.array; + const Node::Children* ptr = &std::get(ns_.content); + for (size_t i = 0, s = path.size() - 1; i < s; ++i) { + assertrx(ptr->size() > path[i]); + const auto& idx = (*ptr)[path[i]]; + if (idx.array == IsArray::Yes) return IsArray::Yes; + std::visit( + reindexer::overloaded{[&ptr](const Node::Children& c) noexcept { ptr = &c; }, [](const Node::Child&) noexcept { assertrx(0); }}, + idx.content); + } + assertrx(ptr->size() > path.back()); + return (*ptr)[path.back()].array; +} + +FieldType NsScheme::GetFieldType(const FieldPath& path) const noexcept { + assertrx(!path.empty()); + const Node::Children& ref = findLastContainer(path); + assertrx(ref.size() > path.back()); + return std::visit(reindexer::overloaded{[](const Node::Child& c) noexcept { return c.type; }, + [](const Node::Children&) noexcept { return FieldType::Struct; }}, + ref[path.back()].content); +} + +void NsScheme::SetFieldType(const FieldPath& path, FieldType ft) noexcept { + assertrx(!path.empty()); + Node::Children& ref = findLastContainer(path); + assertrx(ref.size() > path.back()); + return std::visit(reindexer::overloaded{[ft](Node::Child& c) noexcept { c.type = ft; }, [](Node::Children&) noexcept { assertrx(0); }}, + ref[path.back()].content); +} + +std::string NsScheme::GetJsonPath(const FieldPath& path) const noexcept { + if (path.empty()) return {}; + std::string res; + const Node::Children* ptr = &std::get(ns_.content); + for (size_t i = 0, s = path.size() - 1; i < s; ++i) { + assertrx(ptr->size() > path[i]); + const auto& idx = (*ptr)[path[i]]; + res += idx.name; + std::visit( + reindexer::overloaded{[&ptr](const Node::Children& c) noexcept { ptr = &c; }, [](const Node::Child&) noexcept { assertrx(0); }}, + idx.content); + res += '.'; + } + assertrx(ptr->size() > path.back()); + res += (*ptr)[path.back()].name; + return res; +} + +void NsScheme::AddIndex(const FieldPath& path, size_t index, IsSparse isSparse) { + assertrx(!path.empty()); + if (isSparse == IsSparse::No) { + ns_.sparse = IsSparse::No; + } + Node::Children* ptr = &std::get(ns_.content); + for (size_t i = 0, s = path.size() - 1; i < s; ++i) { + assertrx(ptr->size() > path[i]); + if (isSparse == IsSparse::No) { + (*ptr)[path[i]].sparse = IsSparse::No; + } + std::visit(reindexer::overloaded{[&ptr](Node::Children& c) noexcept { ptr = &c; }, [](Node::Child&) noexcept { assertrx(0); }}, + (*ptr)[path[i]].content); + } + assertrx(ptr->size() > path.back()); + addIndex((*ptr)[path.back()], index, isSparse); +} + +FieldPath NsScheme::AddRndPkField(RandomGenerator& rnd) { + auto& children = std::get(ns_.content); + children.emplace_back(Node{rnd.FieldName(generatedNames_), Node::Child{rnd.RndPkIndexFieldType()}}); + children.back().array = IsArray::No; + children.back().sparse = IsSparse::No; + return {children.size() - 1}; +} + +void NsScheme::addIndex(Node& node, size_t index, IsSparse isSparse) { + if (isSparse == IsSparse::No) { + node.sparse = IsSparse::No; + } + std::visit(reindexer::overloaded{[index](Node::Child& c) noexcept { c.indexes.push_back(index); }, + [](Node::Children&) noexcept { assertrx(0); }}, + node.content); +} + +void NsScheme::fillChildren(Node::Children& children, RandomGenerator& rnd, unsigned level, bool& canBeArray, bool& canBeSparse) { + const size_t fieldsCount = rnd.FieldsCount(level == 0); + children.reserve(fieldsCount); + for (size_t i = 0; i < fieldsCount; ++i) { + auto fName = rnd.FieldName(generatedNames_); + const auto type = rnd.RndFieldType(level); + if (type == FieldType::Struct) { + children.emplace_back(Node{std::move(fName), Node::Children{}}); + fillChildren(std::get(children.back().content), rnd, level + 1, canBeArray, canBeSparse); + if (canBeArray || rnd.RndErr()) { + children.back().array = rnd.RndArrayField(); + } + if (!canBeSparse && !rnd.RndErr()) { + children.back().sparse = IsSparse::No; + } + } else { + children.emplace_back(Node{std::move(fName), Node::Child{type}}); + if (type == FieldType::Point) { + canBeSparse = false; + canBeArray = false; + children.back().sparse = IsSparse::No; + } + if (canBeArray || rnd.RndErr()) { + children.back().array = rnd.RndArrayField(); + } + } + } +} + +const NsScheme::Node::Children& NsScheme::findLastContainer(const FieldPath& path) const noexcept { + const Node::Children* ptr = &std::get(ns_.content); + for (size_t i = 0, s = path.size() - 1; i < s; ++i) { + assertrx(ptr->size() > path[i]); + std::visit( + reindexer::overloaded{[&ptr](const Node::Children& c) noexcept { ptr = &c; }, [](const Node::Child&) noexcept { assertrx(0); }}, + (*ptr)[path[i]].content); + } + return *ptr; +} + +NsScheme::Node::Children& NsScheme::findLastContainer(const FieldPath& path) noexcept { + Node::Children* ptr = &std::get(ns_.content); + for (size_t i = 0, s = path.size() - 1; i < s; ++i) { + assertrx(ptr->size() > path[i]); + std::visit(reindexer::overloaded{[&ptr](Node::Children& c) noexcept { ptr = &c; }, [](Node::Child&) noexcept { assertrx(0); }}, + (*ptr)[path[i]].content); + } + return *ptr; +} + +void NsScheme::rndValueToJson(reindexer::JsonBuilder& builder, FieldType ft, std::string_view name, const std::vector& idxNumbers, + const std::vector& indexes, RandomGenerator& rnd) { switch (ft) { case FieldType::Bool: builder.Put(name, rnd.RndBool(0.5)); @@ -35,7 +222,11 @@ void NsScheme::rndValueToJson(reindexer::JsonBuilder& builder, FieldType ft, std builder.Put(name, rnd.RndIntValue()); break; case FieldType::Int64: - builder.Put(name, rnd.RndInt64Value()); + if (isTtl(idxNumbers, indexes)) { + builder.Put(name, rnd.RndTtlValue()); + } else { + builder.Put(name, rnd.RndInt64Value()); + } break; case FieldType::Double: builder.Put(name, rnd.RndDoubleValue()); @@ -54,41 +245,44 @@ void NsScheme::rndValueToJson(reindexer::JsonBuilder& builder, FieldType ft, std Node::Children children; fillChildren(children, rnd, 2, canBeArray, canBeSparse); auto obj = builder.Object(name); - toJson(obj, children, rnd); + toJson(obj, children, rnd, indexes); } break; default: assertrx(0); } } -void NsScheme::toJson(reindexer::JsonBuilder& builder, const Node::Children& children, RandomGenerator& rnd) { +void NsScheme::toJson(reindexer::JsonBuilder& builder, const Node::Children& children, RandomGenerator& rnd, + const std::vector& indexes) { for (const Node& n : children) { if (!rnd.NeedThisNode(n.sparse)) continue; - if (rnd.RndArrayField(n.array)) { + if (rnd.RndArrayField(n.array) == IsArray::Yes) { auto arr = builder.Array(n.name); const size_t arrSize = rnd.ArraySize(); for (size_t i = 0; i < arrSize; ++i) { if (rnd.RndErr()) { - rndValueToJson(arr, rnd.RndFieldType(), {}, rnd); + rndValueToJson(arr, rnd.RndFieldType(), {}, {}, indexes, rnd); } else { - std::visit(reindexer::overloaded{[&](const Node::Child& c) { rndValueToJson(arr, c.type, {}, rnd); }, - [&](const Node::Children& c) { - auto obj = arr.Object(); - toJson(obj, c, rnd); - }}, - n.content); + std::visit( + reindexer::overloaded{[&](const Node::Child& c) { rndValueToJson(arr, c.type, {}, c.indexes, indexes, rnd); }, + [&](const Node::Children& c) { + auto obj = arr.Object(); + toJson(obj, c, rnd, indexes); + }}, + n.content); } } } else { if (rnd.RndErr()) { - rndValueToJson(builder, rnd.RndFieldType(), n.name, rnd); + rndValueToJson(builder, rnd.RndFieldType(), n.name, {}, indexes, rnd); } else { - std::visit(reindexer::overloaded{[&](const Node::Child& c) { rndValueToJson(builder, c.type, n.name, rnd); }, - [&](const Node::Children& c) { - auto obj = builder.Object(n.name); - toJson(obj, c, rnd); - }}, - n.content); + std::visit( + reindexer::overloaded{[&](const Node::Child& c) { rndValueToJson(builder, c.type, n.name, c.indexes, indexes, rnd); }, + [&](const Node::Children& c) { + auto obj = builder.Object(n.name); + toJson(obj, c, rnd, indexes); + }}, + n.content); } } } @@ -100,9 +294,9 @@ void NsScheme::Node::Dump(std::ostream& os, size_t offset) const { for (size_t i = 0; i <= offset; ++i) os << " "; os << "name: " << name << '\n'; for (size_t i = 0; i <= offset; ++i) os << " "; - os << "sparse: " << (sparse ? "true" : "false") << '\n'; + os << "sparse: " << std::boolalpha << (sparse == IsSparse::Yes) << '\n'; for (size_t i = 0; i <= offset; ++i) os << " "; - os << "array: " << (array ? "true" : "false") << '\n'; + os << "array: " << std::boolalpha << (array == IsArray::Yes) << '\n'; std::visit(reindexer::overloaded{[&](const Child& child) { for (size_t i = 0; i <= offset; ++i) os << " "; os << "type: " << child.type << '\n'; diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/ns_scheme.h b/cpp_src/gtests/tests/fixtures/fuzzing/ns_scheme.h index 675173c36..a63a76e38 100644 --- a/cpp_src/gtests/tests/fixtures/fuzzing/ns_scheme.h +++ b/cpp_src/gtests/tests/fixtures/fuzzing/ns_scheme.h @@ -1,10 +1,10 @@ #pragma once #include +#include #include #include -#include "estl/overloaded.h" -#include "random_generator.h" +#include "types.h" namespace reindexer { @@ -15,17 +15,22 @@ class JsonBuilder; namespace fuzzing { +class RandomGenerator; +class Index; + class NsScheme { struct Node { using Children = std::vector; struct Child { + Child(FieldType t) noexcept : type{t} {} FieldType type; + std::vector indexes; }; std::string name; std::variant content; - bool sparse{true}; - bool array{false}; + IsSparse sparse{IsSparse::Yes}; + IsArray array{IsArray::No}; void Dump(std::ostream&, size_t offset) const; }; @@ -34,164 +39,29 @@ class NsScheme { bool canBeArray = true, canBeSparse = true; fillChildren(std::get(ns_.content), rnd, 0, canBeArray, canBeSparse); } - size_t FieldsCount(const FieldPath& path) const noexcept { - if (path.empty()) { - return std::get(ns_.content).size(); - } - const Node::Children& ref = findLastContainer(path); - assertrx(ref.size() > path.back()); - return std::visit(reindexer::overloaded{[](const Node::Child&) noexcept -> size_t { - assertrx(false); - return 0; - }, - [](const Node::Children& c) noexcept { return c.size(); }}, - ref[path.back()].content); - } - bool IsStruct(const FieldPath& path) const noexcept { - if (path.empty()) return true; - const Node::Children& ref = findLastContainer(path); - assertrx(ref.size() > path.back()); - return std::holds_alternative(ref[path.back()].content); - } - bool IsPoint(const FieldPath& path) const noexcept { - if (path.empty()) return false; - const Node::Children& ref = findLastContainer(path); - assertrx(ref.size() > path.back()); - return !std::holds_alternative(ref[path.back()].content) && - std::get(ref[path.back()].content).type == FieldType::Point; - } - bool IsArray(const FieldPath& path) const noexcept { - if (path.empty()) return ns_.array; - const Node::Children* ptr = &std::get(ns_.content); - for (size_t i = 0, s = path.size() - 1; i < s; ++i) { - assertrx(ptr->size() > path[i]); - const auto& idx = (*ptr)[path[i]]; - if (idx.array) return true; - std::visit(reindexer::overloaded{[&ptr](const Node::Children& c) noexcept { ptr = &c; }, - [](const Node::Child&) noexcept { assert(0); }}, - idx.content); - } - assertrx(ptr->size() > path.back()); - return (*ptr)[path.back()].array; - } - FieldType GetFieldType(const FieldPath& path) const noexcept { - assertrx(!path.empty()); - const Node::Children& ref = findLastContainer(path); - assertrx(ref.size() > path.back()); - return std::visit(reindexer::overloaded{[](const Node::Child& c) noexcept { return c.type; }, - [](const Node::Children&) noexcept { return FieldType::Struct; }}, - ref[path.back()].content); - } - void SetFieldType(const FieldPath& path, FieldType ft) noexcept { - assertrx(!path.empty()); - Node::Children& ref = findLastContainer(path); - assertrx(ref.size() > path.back()); - return std::visit( - reindexer::overloaded{[ft](Node::Child& c) noexcept { c.type = ft; }, [](Node::Children&) noexcept { assert(0); }}, - ref[path.back()].content); - } - std::string GetJsonPath(const FieldPath& path) const noexcept { - if (path.empty()) return {}; - std::string res; - const Node::Children* ptr = &std::get(ns_.content); - for (size_t i = 0, s = path.size() - 1; i < s; ++i) { - assertrx(ptr->size() > path[i]); - const auto& idx = (*ptr)[path[i]]; - res += idx.name; - std::visit(reindexer::overloaded{[&ptr](const Node::Children& c) noexcept { ptr = &c; }, - [](const Node::Child&) noexcept { assert(0); }}, - idx.content); - res += '.'; - } - assertrx(ptr->size() > path.back()); - res += (*ptr)[path.back()].name; - return res; - } - void AddIndex(const FieldPath& path, bool isSparse) { - if (path.empty()) return; - if (!isSparse) ns_.sparse = false; - Node::Children* ptr = &std::get(ns_.content); - for (size_t i = 0, s = path.size() - 1; i < s; ++i) { - assertrx(ptr->size() > path[i]); - if (!isSparse) { - (*ptr)[path[i]].sparse = false; - } - std::visit(reindexer::overloaded{[&ptr](Node::Children& c) noexcept { ptr = &c; }, [](Node::Child&) noexcept { assert(0); }}, - (*ptr)[path[i]].content); - } - assertrx(ptr->size() > path.back()); - mark((*ptr)[path.back()], isSparse); - } - void NewItem(reindexer::WrSerializer&, RandomGenerator&); + size_t FieldsCount(const FieldPath&) const noexcept; + bool IsStruct(const FieldPath&) const noexcept; + bool IsPoint(const FieldPath&) const noexcept; + bool IsTtl(const FieldPath&, const std::vector&) const noexcept; + enum IsArray IsArray(const FieldPath&) const noexcept; + FieldType GetFieldType(const FieldPath&) const noexcept; + void SetFieldType(const FieldPath&, FieldType) noexcept; + std::string GetJsonPath(const FieldPath&) const noexcept; + void AddIndex(const FieldPath&, size_t index, IsSparse); + void NewItem(reindexer::WrSerializer&, RandomGenerator&, const std::vector&); void Dump(std::ostream& os, size_t offset) const { ns_.Dump(os, offset); } - FieldPath AddRndPkField(RandomGenerator& rnd) { - auto& children = std::get(ns_.content); - children.emplace_back(Node{rnd.FieldName(generatedNames_), Node::Child{rnd.RndPkIndexFieldType()}}); - children.back().array = false; - children.back().sparse = false; - return {children.size() - 1}; - } + FieldPath AddRndPkField(RandomGenerator&); private: - static void mark(Node& node, bool isSparse) { - if (!isSparse) { - node.sparse = false; - } - std::visit(reindexer::overloaded{[](Node::Child&) noexcept {}, - [isSparse](Node::Children& c) noexcept { - for (Node& n : c) mark(n, isSparse); - }}, - node.content); - } - void fillChildren(Node::Children& children, RandomGenerator& rnd, unsigned level, bool& canBeArray, bool& canBeSparse) { - const size_t fieldsCount = rnd.FieldsCount(level == 0); - children.reserve(fieldsCount); - for (size_t i = 0; i < fieldsCount; ++i) { - auto fName = rnd.FieldName(generatedNames_); - const auto type = rnd.RndFieldType(level); - if (type == FieldType::Struct) { - children.emplace_back(Node{std::move(fName), Node::Children{}}); - fillChildren(std::get(children.back().content), rnd, level + 1, canBeArray, canBeSparse); - if (canBeArray || rnd.RndErr()) { - children.back().array = rnd.RndArrayField(); - } - if (!canBeSparse && !rnd.RndErr()) { - children.back().sparse = false; - } - } else { - children.emplace_back(Node{std::move(fName), Node::Child{type}}); - if (type == FieldType::Point) { - canBeSparse = false; - canBeArray = false; - children.back().sparse = false; - } - if (canBeArray || rnd.RndErr()) { - children.back().array = rnd.RndArrayField(); - } - } - } - } - const Node::Children& findLastContainer(const FieldPath& path) const noexcept { - const Node::Children* ptr = &std::get(ns_.content); - for (size_t i = 0, s = path.size() - 1; i < s; ++i) { - assertrx(ptr->size() > path[i]); - std::visit(reindexer::overloaded{[&ptr](const Node::Children& c) noexcept { ptr = &c; }, - [](const Node::Child&) noexcept { assert(0); }}, - (*ptr)[path[i]].content); - } - return *ptr; - } - Node::Children& findLastContainer(const FieldPath& path) noexcept { - Node::Children* ptr = &std::get(ns_.content); - for (size_t i = 0, s = path.size() - 1; i < s; ++i) { - assertrx(ptr->size() > path[i]); - std::visit(reindexer::overloaded{[&ptr](Node::Children& c) noexcept { ptr = &c; }, [](Node::Child&) noexcept { assert(0); }}, - (*ptr)[path[i]].content); - } - return *ptr; - } - void toJson(reindexer::JsonBuilder&, const Node::Children&, RandomGenerator&); - void rndValueToJson(reindexer::JsonBuilder&, FieldType, std::string_view name, RandomGenerator&); + static void addIndex(Node&, size_t index, IsSparse); + void fillChildren(Node::Children&, RandomGenerator&, unsigned level, bool& canBeArray, bool& canBeSparse); + const Node::Children& findLastContainer(const FieldPath&) const noexcept; + Node::Children& findLastContainer(const FieldPath&) noexcept; + void toJson(reindexer::JsonBuilder&, const Node::Children&, RandomGenerator&, const std::vector&); + void rndValueToJson(reindexer::JsonBuilder&, FieldType, std::string_view name, const std::vector& idxNumbers, + const std::vector&, RandomGenerator&); + static bool isTtl(const std::vector& idxNumbers, const std::vector&) noexcept; + Node ns_; std::unordered_set generatedNames_; }; diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/query_generator.cc b/cpp_src/gtests/tests/fixtures/fuzzing/query_generator.cc index 520c3abe7..da0f1d081 100644 --- a/cpp_src/gtests/tests/fixtures/fuzzing/query_generator.cc +++ b/cpp_src/gtests/tests/fixtures/fuzzing/query_generator.cc @@ -1,12 +1,13 @@ #include "query_generator.h" #include "core/query/query.h" +#include "index.h" +#include "ns.h" namespace fuzzing { reindexer::Query QueryGenerator::operator()() { if (namespaces_.empty() || rndGen_.RndErr()) { - std::unordered_set generatedNames; - return reindexer::Query{rndGen_.NsName(generatedNames)}; + return reindexer::Query{rndGen_.GenerateNsName()}; } const auto& ns = rndGen_.RndWhich(namespaces_); reindexer::Query query{ns.GetName()}; @@ -15,21 +16,24 @@ reindexer::Query QueryGenerator::operator()() { case Index: if (const auto& indexes = ns.GetIndexes(); !indexes.empty()) { const auto& idx = rndGen_.RndWhich(indexes); - std::visit(reindexer::overloaded{[&](const Index::Child& c) { rndGen_.RndWhere(query, idx.name, {c.type}); }, + std::visit(reindexer::overloaded{[&](const Index::Child& c) { rndGen_.RndWhere(query, idx.Name(), c.type, idx.Type()); }, [&](const Index::Children& c) { std::vector types; types.reserve(c.size()); for (const auto& child : c) types.push_back(child.type); - rndGen_.RndWhere(query, idx.name, types); + rndGen_.RndWhereComposite(query, idx.Name(), std::move(types), idx.Type()); }}, - idx.content); + idx.Content()); } break; case Field: { const auto path = rndGen_.RndField(ns.GetScheme()); const FieldType type = ns.GetScheme().GetFieldType(path); - if (type != FieldType::Struct) { - rndGen_.RndWhere(query, ns.GetScheme().GetJsonPath(path), {type}); + if (type == FieldType::Struct) { // TODO object find + } else { + const std::optional indexType = + ns.GetScheme().IsTtl(path, ns.GetIndexes()) ? IndexType::Ttl : std::optional{}; + rndGen_.RndWhere(query, ns.GetScheme().GetJsonPath(path), type, indexType); } } break; case Empty: diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/query_generator.h b/cpp_src/gtests/tests/fixtures/fuzzing/query_generator.h index 2724b23c5..c2644492b 100644 --- a/cpp_src/gtests/tests/fixtures/fuzzing/query_generator.h +++ b/cpp_src/gtests/tests/fixtures/fuzzing/query_generator.h @@ -1,6 +1,6 @@ #pragma once -#include "ns.h" +#include "random_generator.h" namespace reindexer { @@ -10,10 +10,11 @@ class Query; namespace fuzzing { +class Ns; + class QueryGenerator { public: - QueryGenerator(const std::vector& nss, std::ostream& os, RandomGenerator::ErrFactorType errorFactor) - : namespaces_{nss}, rndGen_{os, errorFactor} {} + QueryGenerator(const std::vector& nss, RandomGenerator::ErrFactorType errorFactor) : namespaces_{nss}, rndGen_{errorFactor} {} reindexer::Query operator()(); private: diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/random_generator.cc b/cpp_src/gtests/tests/fixtures/fuzzing/random_generator.cc index 25e0f640e..e207a6667 100644 --- a/cpp_src/gtests/tests/fixtures/fuzzing/random_generator.cc +++ b/cpp_src/gtests/tests/fixtures/fuzzing/random_generator.cc @@ -1,17 +1,97 @@ #include "random_generator.h" +#include +#include #include +#include +#include "core/payload/fieldsset.h" #include "core/query/query.h" +#include "index.h" #include "ns_scheme.h" namespace fuzzing { -RandomGenerator::RandomGenerator(std::ostream& os, ErrFactorType errorFactor) - : gen_(std::chrono::system_clock::now().time_since_epoch().count()), errFactor_{errorFactor} { +std::string& RandomGenerator::out() noexcept { + static std::string outStr; + return outStr; +} + +std::unique_ptr& RandomGenerator::in() noexcept { + static std::unique_ptr f; + return f; +} + +void RandomGenerator::SetOut(std::string o) { + ASSERT_TRUE(out().empty()); + ASSERT_FALSE(in()); + out() = std::move(o); + { + std::ifstream f{out()}; + ASSERT_FALSE(f.is_open()) << "File '" << out() << "' already exists"; + } +} + +void RandomGenerator::SetIn(const std::string& i) { + ASSERT_FALSE(in()); + ASSERT_TRUE(out().empty()); + in() = std::make_unique(i); + ASSERT_TRUE(in()->is_open()) << "Cannot open file '" << i << '\''; + in()->exceptions(std::ios_base::badbit | std::ios_base::failbit | std::ios_base::eofbit); +} + +RandomGenerator::RandomEngine RandomGenerator::createRandomEngine() { + if (in()) { + RandomEngine ret; + std::string buf; + std::getline(*in(), buf); + std::istringstream ss{buf}; + ss >> ret; + return ret; + } else { + RandomEngine ret(std::chrono::system_clock::now().time_since_epoch().count()); + if (!out().empty()) { + std::ofstream file{out(), std::ios_base::app}; + if (file.is_open()) { + file.exceptions(std::ios_base::badbit | std::ios_base::failbit | std::ios_base::eofbit); + file << ret << std::endl; + } else { + EXPECT_TRUE(false) << "Cannot open file '" << out() << '\''; + } + } + return ret; + } +} + +RandomGenerator::RandomGenerator(ErrFactorType errorFactor) : gen_{createRandomEngine()}, errFactor_{errorFactor} { assertrx(errFactor_.first < errFactor_.second); errParams_ = {static_cast(errFactor_.second - errFactor_.first), static_cast(errFactor_.first)}; - os << gen_ << std::endl; } -RandomGenerator::RandomGenerator(std::istream& is) { is >> gen_; } + +size_t RandomGenerator::FieldsCount(bool firstLevel) { + if (RndErr()) { + enum Err : uint8_t { Zero, TooMany, END = TooMany }; + switch (RndWhich()) { + case Zero: + return 0; + case TooMany: + return RndInt(0, 10'000); + default: + assertrx(0); + } + } + if (firstLevel) { + enum Size : uint8_t { Normal, Long, END = Long }; + switch (RndWhich()) { + case Normal: + return RndInt(1, 9); + case Long: + return RndInt(10, 100); + default: + assertrx(false); + std::abort(); + } + } + return RndInt(1, 5); +} std::string RandomGenerator::FieldName(std::unordered_set& generatedNames) { // TODO static constexpr char alfas[] = "_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; @@ -148,7 +228,7 @@ FieldPath RandomGenerator::RndScalarField(const NsScheme& nsScheme) { const int end = idx + size; while (idx < end) { res.back() = idx % size; - if (!nsScheme.IsArray(res) && !nsScheme.IsPoint(res)) break; + if (nsScheme.IsArray(res) == IsArray::No && !nsScheme.IsPoint(res)) break; ++idx; } if (idx == end) return {}; @@ -156,7 +236,7 @@ FieldPath RandomGenerator::RndScalarField(const NsScheme& nsScheme) { return res; } -std::string RandomGenerator::IndexFieldType(fuzzing::FieldType ft) { +std::string RandomGenerator::IndexFieldType(FieldType ft) { static const std::string types[] = {"bool", "int", "int64", "double", "string", "uuid", "point", "composite"}; if (RndErr()) { // TODO rnd string @@ -167,52 +247,158 @@ std::string RandomGenerator::IndexFieldType(fuzzing::FieldType ft) { return types[i]; } -std::string RandomGenerator::RndIndexType(fuzzing::FieldType ft, bool pk) { - static const std::string types[] = {"-", "hash", "tree", "ttl", "text", "fuzzytext", "rtree"}; - static const std::vector availableTypes[] = { - {0}, // Bool - {0, 1, 2}, // Int - {0, 1, 2, 3}, // Int64 - {0, 2}, // Double - {0, 1, 2 /*, 4, 5*/}, // String // TODO FT indexes - {1}, // Uuid - {6}, // Point - {1, 2 /*, 4, 5*/} // Struct // TODO FT indexes +IndexType RandomGenerator::RndIndexType(IndexType it) { + if (RndErr()) { + return RndWhich(); // TODO + } + return it; +} + +template * Availables> +IndexType RandomGenerator::rndIndexType(const std::vector& fieldTypes) { + if (RndErr()) { + // TODO rnd string + return RndWhich(); // TODO + } + assertrx(!fieldTypes.empty()); + std::vector availables; + { + const size_t f = static_cast(fieldTypes[0]); + assertrx(f < N); + availables = Availables[f]; + } + for (size_t i = 1, s = fieldTypes.size(); i < s; ++i) { + const size_t f = static_cast(fieldTypes[i]); + std::vector tmp; + tmp.reserve(availables.size()); + assertrx(f < N); + std::set_intersection(availables.begin(), availables.end(), Availables[f].begin(), Availables[f].end(), std::back_inserter(tmp)); + availables = tmp; + } + if (availables.empty()) { + return RndWhich(); // TODO + } else { + return RndWhich(availables); + } +} + +IndexType RandomGenerator::RndIndexType(const std::vector& fieldTypes) { + static const std::vector availableTypes[] = { + {IndexType::Store}, // Bool + {IndexType::Store, IndexType::Hash, IndexType::Tree}, // Int + {IndexType::Store, IndexType::Hash, IndexType::Tree, IndexType::Ttl}, // Int64 + {IndexType::Store, IndexType::Tree}, // Double + {IndexType::Store, IndexType::Hash, IndexType::Tree}, // String // TODO IndexType::FastFT IndexType::FuzzyFT + {IndexType::Hash}, // Uuid + {IndexType::RTree}, // Point + {IndexType::Hash, IndexType::Tree} // Struct // TODO IndexType::FastFT IndexType::FuzzyFT }; - static const std::vector availablePkTypes[] = { - {}, // Bool - {1, 2}, // Int - {1, 2, 3}, // Int64 - {2}, // Double - {1, 2}, // String - {1}, // Uuid - {}, // Point - {1, 2} // Struct + return rndIndexType, availableTypes>(fieldTypes); +} + +IndexType RandomGenerator::RndPkIndexType(const std::vector& fieldTypes) { + static const std::vector availablePkTypes[] = { + {}, // Bool + {IndexType::Hash, IndexType::Tree}, // Int + {IndexType::Hash, IndexType::Tree, IndexType::Ttl}, // Int64 + {IndexType::Tree}, // Double + {IndexType::Hash, IndexType::Tree}, // String + {IndexType::Hash}, // Uuid + {}, // Point + {IndexType::Hash, IndexType::Tree} // Struct }; + return rndIndexType, availablePkTypes>(fieldTypes); +} + +size_t RandomGenerator::ArraySize() { + if (RndErr()) return RndInt(0, 100'000); + enum Size : uint8_t { Short, Normal, Long, VeryLong, END = VeryLong }; + switch (RndWhich()) { + case Short: + return RndInt(0, 5); + case Normal: + return RndInt(6, 20); + case Long: + return RndInt(21, 200); + case VeryLong: + return RndInt(201, 10'000); + default: + assertrx(false); + std::abort(); + } +} + +size_t RandomGenerator::IndexesCount() { if (RndErr()) { - // TODO rnd string - return RndWhich(types); + enum Err : uint8_t { Zero, TooMany, END = TooMany }; + switch (RndWhich()) { + case Zero: + return 0; + case TooMany: + return RndInt(reindexer::kMaxIndexes, 5 + reindexer::kMaxIndexes); + default: + assertrx(0); + } } - const size_t i = static_cast(ft); - size_t n; - if (pk) { - assertrx(i < std::size(availablePkTypes)); - if (availablePkTypes[i].empty()) { - return RndWhich(types); + enum Count : uint8_t { Few, Normal, Many, TooMany, END = TooMany }; + switch (RndWhich()) { + case Few: + return RndInt(1, 3); + case Normal: + return RndInt(4, 20); + case Many: + return RndInt(21, 63); + case TooMany: + return RndInt(64, reindexer::kMaxIndexes); + default: + assertrx(false); + std::abort(); + } +} + +size_t RandomGenerator::compositeIndexSize(size_t scalarIndexesCount) { + if (RndErr()) { + enum Err : uint8_t { Zero, /*One,*/ TooMany, END = TooMany }; + switch (RndWhich()) { + case Zero: + return 0; + /*case One: + return 1;*/ + case TooMany: + return RndInt(0, 10'000); + default: + assertrx(0); + } + } + assertrx(scalarIndexesCount >= 1); + return RndInt(1, scalarIndexesCount); +} + +std::vector RandomGenerator::RndFieldsForCompositeIndex(const std::vector& scalarIndexes) { + std::vector result; + const size_t count = compositeIndexSize(scalarIndexes.size()); + result.reserve(count); + const bool uniqueFields = count <= scalarIndexes.size() && !RndErr(); + // TODO unexisted and not indexed fields + if (uniqueFields) { + auto scalars = scalarIndexes; + while (result.size() < count) { + const size_t idx = rndSize(0, scalars.size() - 1); + result.push_back(scalars[idx]); + scalars.erase(scalars.begin() + idx); } - n = RndWhich(availablePkTypes[i]); } else { - assertrx(i < std::size(availableTypes)); - n = RndWhich(availableTypes[i]); + while (result.size() < count) { + result.push_back(scalarIndexes[rndSize(0, scalarIndexes.size() - 1)]); + } } - assertrx(n < std::size(types)); - return types[n]; + return result; } template <> constexpr size_t RandomGenerator::itemsCount = CondType::CondDWithin + 1; -CondType RandomGenerator::rndCond(fuzzing::FieldType ft) { // TODO array +CondType RandomGenerator::rndCond(FieldType ft) { // TODO array if (RndErr()) { return RndWhich(); } @@ -240,8 +426,6 @@ std::string RandomGenerator::rndStrUuidValue(bool noErrors) { if (!noErrors && RndErr()) { err = RndWhich(); } - std::string res; - if (err == Empty) return res; size_t size = 32; switch (err) { case Short: @@ -253,15 +437,17 @@ std::string RandomGenerator::rndStrUuidValue(bool noErrors) { case TooLong: size = RndInt(51, 100'000); break; - case NoErrors: case Empty: + return {}; + case NoErrors: case WrongVariant: case WrongChar: break; default: - assert(0); + assertrx(0); abort(); } + std::string res; res.reserve(size + 4); if (RndBool(0.001)) { res = std::string(std::string::size_type{size}, '0'); @@ -293,94 +479,127 @@ std::string RandomGenerator::rndStrUuidValue(bool noErrors) { reindexer::Uuid RandomGenerator::rndUuidValue() { return reindexer::Uuid{rndStrUuidValue(true)}; } -void RandomGenerator::RndWhere(reindexer::Query& query, const std::string& field, - const std::vector& types) { // TODO array +int64_t RandomGenerator::RndTtlValue() { + const int64_t now = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + // TODO uncomment this after TTL subscribe done + /*enum Size : uint8_t { Negative, FarPast, Past, Now, Future, FarFuture, AnyShort, Any, END = Any }; + switch (RndWhich()) { case Negative: return rndInt64(std::numeric_limits::min(), 0); case FarPast: + return rndInt64(0, now - 10'000); + case Past: + return rndInt64(now - 10'000, now - 10); + case Now: + return rndInt64(now - 10, now + 10); + case Future: + return rndInt64(now + 10, now + 10'000); + case FarFuture: + return rndInt64(now + 10'000, std::numeric_limits::max()); + case AnyShort: + return rndInt64(-50, 50); + case Any: + return rndInt64(std::numeric_limits::min(), std::numeric_limits::max()); + default: + assertrx(false); + std::abort(); + }*/ + return rndInt64(now + 10'000, std::numeric_limits::max()); +} + +void RandomGenerator::RndWhere(reindexer::Query& query, const std::string& field, FieldType fieldType, + std::optional indexType) { // TODO array + if (RndErr()) { + return RndWhereComposite(query, field, RndFieldTypesArray({fieldType}), indexType); + } std::unordered_set generatedNames; - assertrx(!types.empty()); const std::string fldName = FieldName(field, generatedNames); - const auto type = types.size() > 1 ? fuzzing::FieldType::Struct : types[0]; - const auto cond = rndCond(type); - switch (RndFieldType(type)) { - case fuzzing::FieldType::Bool: + const auto cond = rndCond(fieldType); + switch (RndFieldType(fieldType)) { + case FieldType::Bool: query.Where(fldName, cond, RndBool(0.5)); break; - case fuzzing::FieldType::Int: + case FieldType::Int: query.Where(fldName, cond, RndIntValue()); break; - case fuzzing::FieldType::Int64: - query.Where(fldName, cond, RndInt64Value()); + case FieldType::Int64: + if (indexType == IndexType::Ttl) { + query.Where(fldName, cond, RndTtlValue()); + } else { + query.Where(fldName, cond, RndInt64Value()); + } break; - case fuzzing::FieldType::Double: + case FieldType::Double: query.Where(fldName, cond, RndDoubleValue()); break; - case fuzzing::FieldType::String: + case FieldType::String: query.Where(fldName, cond, RndStringValue()); break; - case fuzzing::FieldType::Uuid: + case FieldType::Uuid: if (RndBool(0.5)) { query.Where(fldName, cond, rndUuidValue()); } else { query.Where(fldName, cond, rndStrUuidValue(false)); } break; - case fuzzing::FieldType::Point: + case FieldType::Point: query.Where(fldName, cond, {reindexer::Variant{reindexer::Point{RndDoubleValue(), RndDoubleValue()}}, reindexer::Variant{RndErr() ? RndDoubleValue() : std::abs(RndDoubleValue())}}); break; - case fuzzing::FieldType::Struct: // TODO - if (type == fuzzing::FieldType::Struct) { - } else { - } + case FieldType::Struct: // TODO break; default: assertrx(0); } } -std::ostream& operator<<(std::ostream& os, FieldType ft) { - switch (ft) { - case FieldType::Bool: - return os << "bool"; - case FieldType::Int: - return os << "int"; - case FieldType::Int64: - return os << "int64"; - case FieldType::Double: - return os << "double"; - case FieldType::String: - return os << "string"; - case FieldType::Uuid: - return os << "uuid"; - case FieldType::Point: - return os << "point"; - case FieldType::Struct: - return os << "struct"; - default: - assertrx(0); +void RandomGenerator::RndWhereComposite(reindexer::Query& query, const std::string& field, std::vector&& fieldTypes, + std::optional indexType) { // TODO array + if (RndErr()) { + return RndWhere(query, field, RndFieldType(), indexType); } - return os; -} - -reindexer::KeyValueType ToKeyValueType(FieldType ft) { - switch (ft) { - case FieldType::Bool: - return reindexer::KeyValueType::Bool{}; - case FieldType::Int: - return reindexer::KeyValueType::Int{}; - case FieldType::Int64: - return reindexer::KeyValueType::Int64{}; - case FieldType::Double: - return reindexer::KeyValueType::Double{}; - case FieldType::String: - return reindexer::KeyValueType::String{}; - case FieldType::Uuid: - return reindexer::KeyValueType::Uuid{}; - case FieldType::Point: - case FieldType::Struct: - default: - assertrx(0); + std::unordered_set generatedNames; + const std::string fldName = FieldName(field, generatedNames); + fieldTypes = RndFieldTypesArray(std::move(fieldTypes)); + const auto cond = rndCond(FieldType::Struct); + reindexer::VariantArray keys; + keys.reserve(fieldTypes.size()); + for (const FieldType ft : fieldTypes) { + switch (ft) { + case FieldType::Bool: + keys.emplace_back(RndBool(0.5)); + break; + case FieldType::Int: + keys.emplace_back(RndIntValue()); + break; + case FieldType::Int64: + if (indexType == IndexType::Ttl) { + keys.emplace_back(RndTtlValue()); + } else { + keys.emplace_back(RndInt64Value()); + } + break; + case FieldType::Double: + keys.emplace_back(RndDoubleValue()); + break; + case FieldType::String: + keys.emplace_back(RndStringValue()); + break; + case FieldType::Uuid: + if (RndBool(0.5)) { + keys.emplace_back(rndUuidValue()); + } else { + keys.emplace_back(rndStrUuidValue(false)); + } + break; + case FieldType::Point: + keys.emplace_back(reindexer::Point{RndDoubleValue(), RndDoubleValue()}); + break; + case FieldType::Struct: // TODO + break; + default: + assertrx(0); + } } + query.WhereComposite(fldName, cond, {std::move(keys)}); } } // namespace fuzzing diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/random_generator.h b/cpp_src/gtests/tests/fixtures/fuzzing/random_generator.h index 7a628fe85..4960f28c3 100644 --- a/cpp_src/gtests/tests/fixtures/fuzzing/random_generator.h +++ b/cpp_src/gtests/tests/fixtures/fuzzing/random_generator.h @@ -1,10 +1,12 @@ #pragma once -#include +#include +#include #include #include #include "core/type_consts.h" #include "tools/assertrx.h" +#include "types.h" namespace reindexer { @@ -16,137 +18,69 @@ class KeyValueType; namespace fuzzing { -struct Index; class NsScheme; -enum class FieldType { Bool, Int, Int64, Double, String, Uuid, Point, Struct, END = Struct }; -reindexer::KeyValueType ToKeyValueType(FieldType); -std::ostream& operator<<(std::ostream&, FieldType); -using FieldPath = std::vector; - class RandomGenerator { using ErrFactorInt = uint32_t; public: using ErrFactorType = std::pair; - RandomGenerator(std::ostream&, ErrFactorType errorFactor); - RandomGenerator(std::istream&); + RandomGenerator(ErrFactorType errorFactor); - size_t FieldsCount(bool firstLevel) { - if (RndErr()) { - enum Err : uint8_t { Zero, TooMany, END = TooMany }; - switch (RndWhich()) { - case Zero: - return 0; - case TooMany: - return RndInt(0, 10'000); - default: - assertrx(0); - } - } - if (firstLevel) { - enum Size : uint8_t { Normal, Long, END = Long }; - switch (RndWhich()) { - case Normal: - return RndInt(1, 9); - case Long: - return RndInt(10, 100); - default: - assertrx(false); - std::abort(); - } - } - return RndInt(1, 5); - } - fuzzing::FieldType RndFieldType(unsigned level) { + size_t FieldsCount(bool firstLevel); + FieldType RndFieldType(unsigned level) { const bool withoutStruct = level > 2 && (level > 5 || !RndBool(1.0 / (2 << (2 * level)))); - return static_cast( - RndInt(static_cast(fuzzing::FieldType::Bool), static_cast(fuzzing::FieldType::Struct) - withoutStruct)); + return static_cast(RndInt(static_cast(FieldType::Bool), static_cast(FieldType::Struct) - withoutStruct)); } - fuzzing::FieldType RndFieldType() { - return static_cast( - RndInt(static_cast(fuzzing::FieldType::Bool), static_cast(fuzzing::FieldType::Point))); + FieldType RndFieldType() { + return static_cast(RndInt(static_cast(FieldType::Bool), static_cast(FieldType::Point))); } - fuzzing::FieldType RndPkIndexFieldType() { - return static_cast( - RndInt(static_cast(fuzzing::FieldType::Int), static_cast(fuzzing::FieldType::Uuid))); + FieldType RndPkIndexFieldType() { + return static_cast(RndInt(static_cast(FieldType::Int), static_cast(FieldType::Uuid))); } - std::string IndexFieldType(fuzzing::FieldType); - fuzzing::FieldType RndFieldType(fuzzing::FieldType type) { + std::string IndexFieldType(FieldType); + FieldType RndFieldType(FieldType type) { if (RndErr()) { - return RndWhich(); + return RndWhich(); } return type; } - std::string RndIndexType(fuzzing::FieldType, bool pk); - bool RndArrayField() { return RndBool(0.2); } - bool RndArrayField(bool array) { return RndErr() ? !array : array; } - size_t ArraySize() { - if (RndErr()) return RndInt(0, 100'000); - enum Size : uint8_t { Short, Normal, Long, VeryLong, END = VeryLong }; - switch (RndWhich()) { - case Short: - return RndInt(0, 5); - case Normal: - return RndInt(6, 20); - case Long: - return RndInt(21, 200); - case VeryLong: - return RndInt(201, 10'000); - default: - assertrx(false); - std::abort(); + std::vector RndFieldTypesArray(std::vector&& types) { + if (!RndErr()) { + return std::move(types); } - } - bool PkIndex(bool pk) { return RndErr() ? RndBool(0.5) : pk; } - bool SparseIndex(bool pk) { return pk ? RndErr() : RndBool(0.2); } - bool DenseIndex() { return RndBool(0.2); } - int64_t ExpiredIndex() { return RndInt(0, 100'000); } // TODO - size_t IndexesCount() { - if (RndErr()) { - enum Err : uint8_t { Zero, TooMany, END = TooMany }; - switch (RndWhich()) { - case Zero: - return 0; - case TooMany: - return RndInt(0, 1'000); - default: - assertrx(0); - } + if (RndBool(0.5)) { + types.resize(compositeIndexSize(types.size())); } - enum Count : uint8_t { Few, Normal, Many, TooMany, END = TooMany }; - switch (RndWhich()) { - case Few: - return RndInt(1, 3); - case Normal: - return RndInt(4, 6); - case Many: - return RndInt(7, 20); - case TooMany: - return RndInt(21, 63); - default: - assertrx(false); - std::abort(); + for (auto& t : types) { + t = RndFieldType(); } + return std::move(types); } - bool CompositeIndex() { return RndBool(0.2); } - bool UniqueName() { return RndBool(0.5); } - size_t CompositeIndexSize() { + IndexType RndIndexType(const std::vector&); + IndexType RndPkIndexType(const std::vector&); + IndexType RndIndexType(IndexType); + IsArray RndArrayField() { return RndBool(0.2) ? IsArray::Yes : IsArray::No; } + IsArray RndArrayField(IsArray array) { if (RndErr()) { - enum Err : uint8_t { Zero, One, TooMany, END = TooMany }; - switch (RndWhich()) { - case Zero: - return 0; - case One: - return 1; - case TooMany: - return RndInt(0, 10'000); - default: - assertrx(0); - } + return array == IsArray::Yes ? IsArray::No : IsArray::Yes; } - return RndInt(2, 5); + return array; + } + size_t ArraySize(); + bool PkIndex(bool pk) { return RndErr() ? RndBool(0.5) : pk; } + IsSparse RndSparseIndex(FieldType fldType) { + const bool couldBeSparse = fldType != FieldType::Struct && fldType != FieldType::Uuid; // TODO remove uuid #1470 + return (couldBeSparse ? RndBool(0.2) : RndErr()) ? IsSparse::Yes : IsSparse::No; } + bool RndSparseIndex(IsSparse isSparse) { return (isSparse == IsSparse::Yes) != RndErr(); } + bool DenseIndex() { return RndBool(0.2); } + int64_t ExpiredIndex() { return RndInt(0, 100'000); } // TODO + size_t IndexesCount(); + bool CompositeIndex(size_t scalarIndexesCount) { return scalarIndexesCount < 1 ? RndErr() : RndBool(0.2); } + bool UniqueName() { return RndBool(0.5); } + size_t compositeIndexSize(size_t scalarIndexesCount); + std::vector RndFieldsForCompositeIndex(const std::vector& scalarIndexes); std::string FieldName(std::unordered_set& generatedNames); std::string FieldName(const std::string& fieldName, std::unordered_set& generatedNames) { if (RndErr()) return FieldName(generatedNames); @@ -155,9 +89,12 @@ class RandomGenerator { FieldPath RndField(const NsScheme&); FieldPath RndScalarField(const NsScheme&); std::string IndexName(std::unordered_set& generatedNames) { return FieldName(generatedNames); } // TODO - std::string NsName(std::unordered_set& generatedNames) { return FieldName(generatedNames); } // TODO - std::string NsName(const std::string& nsName, std::unordered_set& generatedNames) { - if (RndErr()) return NsName(generatedNames); + std::string GenerateNsName() { // TODO + std::unordered_set generatedNames; + return FieldName(generatedNames); + } + std::string NsName(const std::string& nsName) { + if (RndErr()) return GenerateNsName(); return nsName; } int RndInt(int min, int max) { return rndInt_(gen_, IntRndParams(min, max)); } @@ -200,7 +137,7 @@ class RandomGenerator { return err; } char RndChar() { return rndChar_(gen_); } - bool NeedThisNode(bool sparse) { return sparse ? RndBool(0.5) : !RndErr(); } + bool NeedThisNode(IsSparse sparse) { return sparse == IsSparse::Yes ? RndBool(0.5) : !RndErr(); } int RndIntValue() { enum Size : uint8_t { Short, Long, END = Long }; switch (RndWhich()) { @@ -225,6 +162,7 @@ class RandomGenerator { std::abort(); } } + int64_t RndTtlValue(); bool RndBool(double p) { return rndBool_(gen_, BoolRndParams{p}); } double RndDoubleValue() { enum Size : uint8_t { Short, Long, END = Long }; @@ -273,14 +211,19 @@ class RandomGenerator { } template const auto& RndWhich(const Cont& cont) { - assert(!std::empty(cont)); + assertrx(!std::empty(cont)); auto it = std::begin(cont); std::advance(it, rndSize(0, std::size(cont) - 1)); return *it; } - void RndWhere(reindexer::Query&, const std::string& field, const std::vector&); + void RndWhere(reindexer::Query&, const std::string& field, FieldType, std::optional); + void RndWhereComposite(reindexer::Query&, const std::string& field, std::vector&&, std::optional); + + static void SetOut(std::string); + static void SetIn(const std::string&); private: + using RandomEngine = std::default_random_engine; using IntRndParams = std::uniform_int_distribution<>::param_type; using SizeRndParams = std::uniform_int_distribution::param_type; using Int64RndParams = std::uniform_int_distribution::param_type; @@ -291,11 +234,16 @@ class RandomGenerator { int rndInt(IntRndParams params) { return rndInt_(gen_, params); } int64_t rndInt64(int64_t min, int64_t max) { return rndInt64_(gen_, Int64RndParams(min, max)); } size_t rndSize(size_t min, size_t max) { return rndSize_(gen_, SizeRndParams(min, max)); } - CondType rndCond(fuzzing::FieldType); + CondType rndCond(FieldType); std::string rndStrUuidValue(bool noErrors); reindexer::Uuid rndUuidValue(); + template * Availables> + IndexType rndIndexType(const std::vector&); + static std::string& out() noexcept; + static std::unique_ptr& in() noexcept; + static RandomEngine createRandomEngine(); - std::default_random_engine gen_; + RandomEngine gen_; ErrFactorType errFactor_; ErrorParams errParams_; std::uniform_int_distribution<> rndInt_; diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/types.cc b/cpp_src/gtests/tests/fixtures/fuzzing/types.cc new file mode 100644 index 000000000..11b50b658 --- /dev/null +++ b/cpp_src/gtests/tests/fixtures/fuzzing/types.cc @@ -0,0 +1,90 @@ +#include "types.h" + +#include +#include +#include "core/key_value_type.h" + +namespace fuzzing { + +std::ostream& operator<<(std::ostream& os, FieldType ft) { + switch (ft) { + case FieldType::Bool: + return os << "bool"; + case FieldType::Int: + return os << "int"; + case FieldType::Int64: + return os << "int64"; + case FieldType::Double: + return os << "double"; + case FieldType::String: + return os << "string"; + case FieldType::Uuid: + return os << "uuid"; + case FieldType::Point: + return os << "point"; + case FieldType::Struct: + return os << "struct"; + default: + assertrx(0); + } + return os; +} + +reindexer::KeyValueType ToKeyValueType(FieldType ft) { + switch (ft) { + case FieldType::Bool: + return reindexer::KeyValueType::Bool{}; + case FieldType::Int: + return reindexer::KeyValueType::Int{}; + case FieldType::Int64: + return reindexer::KeyValueType::Int64{}; + case FieldType::Double: + return reindexer::KeyValueType::Double{}; + case FieldType::String: + return reindexer::KeyValueType::String{}; + case FieldType::Uuid: + return reindexer::KeyValueType::Uuid{}; + case FieldType::Point: + return reindexer::KeyValueType::Undefined{}; // TODO change to KeyValueType::Point #1352 + case FieldType::Struct: + default: + assertrx(0); + } +} + +std::ostream& operator<<(std::ostream& os, const FieldPath& fp) { + os << '['; + for (size_t i = 0, s = fp.size(); i < s; ++i) { + if (i != 0) { + os << ' '; + } + os << fp[i]; + } + return os << ']' << std::endl; +} + +std::string_view ToText(IndexType it) { + using namespace std::string_view_literals; + switch (it) { + case IndexType::Store: + return "-"sv; + case IndexType::Hash: + return "hash"sv; + case IndexType::Tree: + return "tree"sv; + case IndexType::Ttl: + return "ttl"sv; + case IndexType::FastFT: + return "text"sv; + case IndexType::FuzzyFT: + return "fuzzytext"sv; + case IndexType::RTree: + return "rtree"sv; + default: + assertrx(0); + } +} + +std::ostream& operator<<(std::ostream& os, IndexType it) { return os << ToText(it); } + +} // namespace fuzzing diff --git a/cpp_src/gtests/tests/fixtures/fuzzing/types.h b/cpp_src/gtests/tests/fixtures/fuzzing/types.h new file mode 100644 index 000000000..db97087c6 --- /dev/null +++ b/cpp_src/gtests/tests/fixtures/fuzzing/types.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include + +namespace reindexer { + +class KeyValueType; + +} // namespace reindexer + +namespace fuzzing { + +enum class FieldType { Bool, Int, Int64, Double, String, Uuid, Point, Struct, END = Struct }; +reindexer::KeyValueType ToKeyValueType(FieldType); +std::ostream& operator<<(std::ostream&, FieldType); + +using FieldPath = std::vector; +std::ostream& operator<<(std::ostream&, const FieldPath&); + +enum class IndexType { Store, Hash, Tree, Ttl, FastFT, FuzzyFT, RTree, END = RTree }; +std::string_view ToText(IndexType); +std::ostream& operator<<(std::ostream&, IndexType); + +enum class IsArray : bool { Yes = true, No = false }; +enum class IsSparse : bool { Yes = true, No = false }; + +} // namespace fuzzing diff --git a/cpp_src/gtests/tests/fixtures/get_pk_api.h b/cpp_src/gtests/tests/fixtures/get_pk_api.h index ebe17f9ea..29e7ad876 100644 --- a/cpp_src/gtests/tests/fixtures/get_pk_api.h +++ b/cpp_src/gtests/tests/fixtures/get_pk_api.h @@ -90,7 +90,7 @@ class ExtractPK : public testing::Test { Error err = db_->Select(query, qres); if (!err.ok()) return ResultType(err, QueryResults{}); - if (print) printQueryResults(query._namespace, qres); + if (print) printQueryResults(query.NsName(), qres); return ResultType(err, std::move(qres)); } diff --git a/cpp_src/gtests/tests/fixtures/grpcclient_api.h b/cpp_src/gtests/tests/fixtures/grpcclient_api.h index 39230d1a8..afb7faa45 100644 --- a/cpp_src/gtests/tests/fixtures/grpcclient_api.h +++ b/cpp_src/gtests/tests/fixtures/grpcclient_api.h @@ -267,7 +267,7 @@ class GrpcClientApi : public ReindexerApi { reindexer::Serializer rdser(cjson); reindexer::CJsonDecoder decoder(const_cast(nsTypes.first)); - ASSERT_NO_THROW(decoder.Decode(pl, rdser, wrser)); + ASSERT_NO_THROW(decoder.Decode<>(pl, rdser, wrser)); ASSERT_TRUE(rdser.Eof()); } diff --git a/cpp_src/gtests/tests/fixtures/item_move_semantics_api.h b/cpp_src/gtests/tests/fixtures/item_move_semantics_api.h index 1c0434473..ba10e1784 100644 --- a/cpp_src/gtests/tests/fixtures/item_move_semantics_api.h +++ b/cpp_src/gtests/tests/fixtures/item_move_semantics_api.h @@ -6,9 +6,6 @@ #include "gason/gason.h" #include "reindexer_api.h" -using reindexer::Item; -using reindexer::ItemImpl; - class ItemMoveSemanticsApi : public ReindexerApi { protected: const std::string pkField = "bookid"; @@ -18,14 +15,22 @@ class ItemMoveSemanticsApi : public ReindexerApi { void SetUp() override { ReindexerApi::SetUp(); - rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - rt.reindexer->AddIndex(default_namespace, {"bookid", "hash", "int", IndexOpts().PK()}); - rt.reindexer->AddIndex(default_namespace, {"title", "text", "string", IndexOpts()}); - rt.reindexer->AddIndex(default_namespace, {"pages", "hash", "int", IndexOpts().PK()}); - rt.reindexer->AddIndex(default_namespace, {"price", "hash", "int", IndexOpts().PK()}); - rt.reindexer->AddIndex(default_namespace, {"genreid_fk", "hash", "int", IndexOpts().PK()}); - rt.reindexer->AddIndex(default_namespace, {"authorid_fk", "hash", "int", IndexOpts().PK()}); - rt.reindexer->Commit(default_namespace); + auto err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); + ASSERT_TRUE(err.ok()) << err.what(); + err = rt.reindexer->AddIndex(default_namespace, {"bookid", "hash", "int", IndexOpts().PK()}); + ASSERT_TRUE(err.ok()) << err.what(); + err = rt.reindexer->AddIndex(default_namespace, {"title", "text", "string", IndexOpts()}); + ASSERT_TRUE(err.ok()) << err.what(); + err = rt.reindexer->AddIndex(default_namespace, {"pages", "hash", "int", IndexOpts()}); + ASSERT_TRUE(err.ok()) << err.what(); + err = rt.reindexer->AddIndex(default_namespace, {"price", "hash", "int", IndexOpts()}); + ASSERT_TRUE(err.ok()) << err.what(); + err = rt.reindexer->AddIndex(default_namespace, {"genreid_fk", "hash", "int", IndexOpts()}); + ASSERT_TRUE(err.ok()) << err.what(); + err = rt.reindexer->AddIndex(default_namespace, {"authorid_fk", "hash", "int", IndexOpts()}); + ASSERT_TRUE(err.ok()) << err.what(); + err = rt.reindexer->Commit(default_namespace); + ASSERT_TRUE(err.ok()) << err.what(); } void prepareItems() { @@ -48,7 +53,8 @@ class ItemMoveSemanticsApi : public ReindexerApi { ASSERT_TRUE(err.ok()) << err.what(); ASSERT_NO_THROW(gason::JsonParser().Parse(item.GetJSON())); } - rt.reindexer->Commit(default_namespace); + const auto err = rt.reindexer->Commit(default_namespace); + ASSERT_TRUE(err.ok()) << err.what(); } Item getItemById(int id) { diff --git a/cpp_src/gtests/tests/fixtures/join_on_conditions_api.h b/cpp_src/gtests/tests/fixtures/join_on_conditions_api.h index 6326c8faf..8834ba94e 100644 --- a/cpp_src/gtests/tests/fixtures/join_on_conditions_api.h +++ b/cpp_src/gtests/tests/fixtures/join_on_conditions_api.h @@ -23,7 +23,8 @@ class JoinOnConditionsApi : public JoinSelectsApi { builder.End(); err = item.FromJSON(ser.c_str()); ASSERT_TRUE(err.ok()) << err.what(); - rt.reindexer->Insert(leftNs, item); + err = rt.reindexer->Insert(leftNs, item); + ASSERT_TRUE(err.ok()) << err.what(); } for (unsigned int i = 0; i < rightNsData.size(); i++) { @@ -40,7 +41,8 @@ class JoinOnConditionsApi : public JoinSelectsApi { builder.End(); err = item.FromJSON(ser.c_str()); ASSERT_TRUE(err.ok()) << err.what(); - rt.reindexer->Insert(rightNs, item); + err = rt.reindexer->Insert(rightNs, item); + ASSERT_TRUE(err.ok()) << err.what(); } } diff --git a/cpp_src/gtests/tests/fixtures/queries_api.cc b/cpp_src/gtests/tests/fixtures/queries_api.cc index 9047a9627..a1be92c4c 100644 --- a/cpp_src/gtests/tests/fixtures/queries_api.cc +++ b/cpp_src/gtests/tests/fixtures/queries_api.cc @@ -281,6 +281,7 @@ void QueriesApi::initConditionsNs() { ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->AddIndex(conditionsNs, {kFieldNameId, "hash", "int", IndexOpts{}.PK()}); ASSERT_TRUE(err.ok()) << err.what(); + addIndexFields(conditionsNs, kFieldNameId, {{kFieldNameId, reindexer::KeyValueType::Int{}}}); for (const auto& fit : fieldIndexTypes) { for (const auto& it : fit.indexTypes) { for (const bool isArray : {true, false}) { @@ -291,8 +292,8 @@ void QueriesApi::initConditionsNs() { const std::string fieldType{fit.fieldType.Name()}; const std::string indexName{createIndexName(fieldType, it, isArray, isSparse)}; err = rt.reindexer->AddIndex(conditionsNs, {indexName, it, fieldType, IndexOpts{}.Array(isArray).Sparse(isSparse)}); - addIndexFields(conditionsNs, indexName, {{indexName, fit.fieldType}}); ASSERT_TRUE(err.ok()) << err.what(); + addIndexFields(conditionsNs, indexName, {{indexName, fit.fieldType}}); } } } @@ -300,6 +301,27 @@ void QueriesApi::initConditionsNs() { setPkFields(conditionsNs, {kFieldNameId}); } +void QueriesApi::initUUIDNs() { + const auto err = rt.reindexer->OpenNamespace(uuidNs); + ASSERT_TRUE(err.ok()) << err.what(); + DefineNamespaceDataset( + uuidNs, + { + IndexDeclaration{kFieldNameId, "hash", "int", IndexOpts{}.PK(), 0}, + IndexDeclaration{kFieldNameUuid, "hash", "uuid", IndexOpts{}, 0}, + /*IndexDeclaration{kFieldNameUuidSparse, "hash", "uuid", IndexOpts{}.Sparse(), 0}, // TODO uncomment this #1470 + IndexDeclaration{kFieldNameUuidNotIndex2, "hash", "uuid", IndexOpts{}, 0}, + IndexDeclaration{kFieldNameUuidNotIndex3, "hash", "uuid", IndexOpts{}.Sparse(), 0},*/ + IndexDeclaration{kFieldNameUuidArr, "hash", "uuid", IndexOpts{}.Array(), 0}, + // IndexDeclaration{kFieldNameUuidArrSparse, "hash", "uuid", IndexOpts{}.Array().Sparse(), 0} // TODO uncomment this #1470 + }); + for (const auto& idx : + {kFieldNameUuid, kFieldNameUuidArr /*, kFieldNameUuidSparse, kFieldNameUuidArrSparse*/}) { // TODO uncomment this #1470 + addIndexFields(uuidNs, idx, {{idx, reindexer::KeyValueType::Uuid{}}}); + } + setPkFields(uuidNs, {kFieldNameId}); +} + static reindexer::Variant createRandValue(int id, reindexer::KeyValueType fieldType) { using namespace reindexer; return fieldType.EvaluateOneOf(overloaded{ @@ -376,41 +398,19 @@ static reindexer::VariantArray createRandArrValues(size_t min, size_t max, int i } void QueriesApi::checkAllConditions(const std::string& fieldName, reindexer::KeyValueType fieldType, NullAllowed nullAllowed) { - for (const auto cond : {CondEq, CondSet, CondAllSet, CondLt, CondLe, CondGt, CondGe, CondRange, CondAny, CondEmpty, CondLike}) { - size_t min = 0, max = rand() % kMaxArraySize; - switch (cond) { - case CondEq: - case CondSet: - case CondAllSet: - break; - case CondLike: - if (!fieldType.Is()) { - continue; - } - [[fallthrough]]; - case CondLt: - case CondLe: - case CondGt: - case CondGe: - min = max = 1; - break; - case CondRange: - min = max = 2; - break; - case CondAny: - case CondEmpty: - if (nullAllowed == NullAllowed::No) { - continue; - } - min = max = 0; - break; - case CondDWithin: // TODO #1352 - assert(0); + for (const auto cond : {CondEq, CondSet, CondAllSet, CondLt, CondLe, CondGt, CondGe, CondRange, CondAny, CondEmpty, + CondLike}) { // TODO CondDWithin #1352 + if (cond == CondLike && !fieldType.Is()) { + continue; } + if (nullAllowed == NullAllowed::No && (cond == CondAny || cond == CondEmpty)) { + continue; + } + const auto argsCount = minMaxArgs(cond, 20); for (size_t i = 0; i < 3; ++i) { - ExecuteAndVerify( - reindexer::Query{conditionsNs}.Where(fieldName, cond, createRandArrValues(min, max, rand() % conditionsNsSize, fieldType))); - if (min <= 1 && max >= 1) { + ExecuteAndVerify(reindexer::Query{conditionsNs}.Where( + fieldName, cond, createRandArrValues(argsCount.min, argsCount.max, rand() % conditionsNsSize, fieldType))); + if (argsCount.min <= 1 && argsCount.max >= 1) { ExecuteAndVerify( reindexer::Query{conditionsNs}.Where(fieldName, cond, createRandValue(rand() % conditionsNsSize, fieldType))); } @@ -439,3 +439,99 @@ void QueriesApi::CheckConditions() { checkAllConditions(fieldType + "_array", fit.fieldType, NullAllowed::Yes); } } + +void QueriesApi::FillUUIDNs() { + static size_t lastId = 0; + reindexer::WrSerializer ser; + for (size_t i = lastId; i < uuidNsSize + lastId; ++i) { + Item item = rt.reindexer->NewItem(uuidNs); + ASSERT_TRUE(item.Status().ok()) << item.Status().what(); + if (rand() % 2) { + ser.Reset(); + { + reindexer::JsonBuilder json{ser}; + json.Put(kFieldNameId, i); + json.Put(kFieldNameUuid, randStrUuid()); + /*if (rand() % 2) { + json.Put(kFieldNameUuidSparse, randStrUuid()); // TODO uncomment this #1470 + }*/ + { + auto arr = json.Array(kFieldNameUuidArr); + for (size_t j = 0, s = rand() % 10; j < s; ++j) { + arr.Put({}, randStrUuid()); + } + } + /*if (rand() % 2) { + auto arr = json.Array(kFieldNameUuidArrSparse); // TODO uncomment this #1470 + for (size_t j = 0, s = rand() % 10; j < s; ++j) { + arr.Put({}, randStrUuid()); + } + }*/ + if (rand() % 2) { + json.Put(kFieldNameUuidNotIndex, randStrUuid()); + } + /*json.Put(kFieldNameUuidNotIndex2, randStrUuid()); // TODO uncomment this #1470 + if (rand() % 2) { + json.Put(kFieldNameUuidNotIndex3, randStrUuid()); + }*/ + if (rand() % 2) { + json.Put(kFieldNameRndString, RandString()); + } + } + const auto err = item.FromJSON(ser.Slice()); + ASSERT_TRUE(err.ok()) << err.what(); + } else { + item[kFieldNameId] = int(i); + if (rand() % 2) { + item[kFieldNameUuid] = randUuid(); + } else { + item[kFieldNameUuid] = randStrUuid(); + } + /*if (rand() % 2) { + item[kFieldNameUuidSparse] = randUuid(); // TODO uncomment this #1470 + }*/ + item[kFieldNameUuidArr] = randHeterogeneousUuidArray(0, 20); + /*if (rand() % 2) { + item[kFieldNameUuidArrSparse] = randHeterogeneousUuidArray(0, 20); // TODO uncomment this #1470 + } + if (rand() % 2) { + item[kFieldNameUuidNotIndex2] = randUuid(); + } else { + item[kFieldNameUuidNotIndex2] = randStrUuid(); + } + if (rand() % 2) { + if (rand() % 2) { + item[kFieldNameUuidNotIndex3] = randUuid(); + } else { + item[kFieldNameUuidNotIndex3] = randStrUuid(); + } + }*/ + } + ASSERT_TRUE(item.Status().ok()) << item.Status().what(); + Upsert(uuidNs, item); + saveItem(std::move(item), uuidNs); + } + const auto err = Commit(uuidNs); + ASSERT_TRUE(err.ok()) << err.what(); + lastId += uuidNsSize; +} + +void QueriesApi::CheckUUIDQueries() { + for (size_t i = 0; i < 10; ++i) { + for (const auto& field : { + kFieldNameUuid, kFieldNameUuidArr, kFieldNameUuidNotIndex, kFieldNameRndString /*, + kFieldNameUuidSparse, kFieldNameUuidArrSparse, kFieldNameUuidNotIndex2, kFieldNameUuidNotIndex3*/ + }) { // TODO uncomment this #1470 + for (auto cond : {CondEq, CondLe, CondLt, CondSet, CondGe, CondGt, CondAllSet, CondRange}) { + const auto argsCount = minMaxArgs(cond, 20); + if (argsCount.min <= 1 && argsCount.max >= 1) { + ExecuteAndVerify(Query(uuidNs).Where(field, cond, randUuid())); + ExecuteAndVerify(Query(uuidNs).Where(field, cond, randStrUuid())); + } + ExecuteAndVerify(Query(uuidNs).Where(field, cond, randUuidArray(argsCount.min, argsCount.max))); + ExecuteAndVerify(Query(uuidNs).Where(field, cond, randStrUuidArray(argsCount.min, argsCount.max))); + ExecuteAndVerify(Query(uuidNs).Where(field, cond, randHeterogeneousUuidArray(argsCount.min, argsCount.max))); + } + } + } +} diff --git a/cpp_src/gtests/tests/fixtures/queries_api.h b/cpp_src/gtests/tests/fixtures/queries_api.h index 20e4aa9af..b8306b844 100644 --- a/cpp_src/gtests/tests/fixtures/queries_api.h +++ b/cpp_src/gtests/tests/fixtures/queries_api.h @@ -9,14 +9,11 @@ #include #include "core/cjson/jsonbuilder.h" #include "core/keyvalue/geometry.h" -#include "core/nsselecter/sortexpression.h" #include "core/queryresults/joinresults.h" -#include "core/type_consts_helpers.h" #include "gtests/tools.h" #include "queries_verifier.h" #include "reindexer_api.h" #include "tools/random.h" -#include "tools/stringstools.h" class QueriesApi : public ReindexerApi, public QueriesVerifier { public: @@ -66,12 +63,33 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { IndexDeclaration{kCompositeFieldUuidName, "hash", "composite", IndexOpts{}, 0}, IndexDeclaration{kFieldNameYearSparse, "hash", "string", IndexOpts{}.Sparse(), 0}, }); + addIndexFields(default_namespace, kFieldNameId, {{kFieldNameId, reindexer::KeyValueType::Int{}}}); + addIndexFields(default_namespace, kFieldNameGenre, {{kFieldNameGenre, reindexer::KeyValueType::Int{}}}); + addIndexFields(default_namespace, kFieldNameYear, {{kFieldNameYear, reindexer::KeyValueType::Int{}}}); + addIndexFields(default_namespace, kFieldNamePackages, {{kFieldNamePackages, reindexer::KeyValueType::Int{}}}); + addIndexFields(default_namespace, kFieldNameName, {{kFieldNameName, reindexer::KeyValueType::String{}}}); + addIndexFields(default_namespace, kFieldNameCountries, {{kFieldNameCountries, reindexer::KeyValueType::String{}}}); + addIndexFields(default_namespace, kFieldNameAge, {{kFieldNameAge, reindexer::KeyValueType::Int{}}}); + addIndexFields(default_namespace, kFieldNameDescription, {{kFieldNameDescription, reindexer::KeyValueType::String{}}}); + addIndexFields(default_namespace, kFieldNameRate, {{kFieldNameRate, reindexer::KeyValueType::Double{}}}); + addIndexFields(default_namespace, kFieldNameIsDeleted, {{kFieldNameIsDeleted, reindexer::KeyValueType::Bool{}}}); + addIndexFields(default_namespace, kFieldNameActor, {{kFieldNameActor, reindexer::KeyValueType::String{}}}); + addIndexFields(default_namespace, kFieldNamePriceId, {{kFieldNamePriceId, reindexer::KeyValueType::Int{}}}); + addIndexFields(default_namespace, kFieldNameLocation, {{kFieldNameLocation, reindexer::KeyValueType::String{}}}); + addIndexFields(default_namespace, kFieldNameEndTime, {{kFieldNameEndTime, reindexer::KeyValueType::Int{}}}); + addIndexFields(default_namespace, kFieldNameStartTime, {{kFieldNameStartTime, reindexer::KeyValueType::Int{}}}); + addIndexFields(default_namespace, kFieldNameBtreeIdsets, {{kFieldNameBtreeIdsets, reindexer::KeyValueType::Int{}}}); + addIndexFields(default_namespace, kFieldNameTemp, {{kFieldNameTemp, reindexer::KeyValueType::String{}}}); + addIndexFields(default_namespace, kFieldNameNumeric, {{kFieldNameNumeric, reindexer::KeyValueType::String{}}}); + addIndexFields(default_namespace, kFieldNameUuid, {{kFieldNameUuid, reindexer::KeyValueType::Uuid{}}}); + addIndexFields(default_namespace, kFieldNameUuidArr, {{kFieldNameUuidArr, reindexer::KeyValueType::Uuid{}}}); addIndexFields(default_namespace, kCompositeFieldIdTemp, {{kFieldNameId, reindexer::KeyValueType::Int{}}, {kFieldNameTemp, reindexer::KeyValueType::String{}}}); addIndexFields(default_namespace, kCompositeFieldAgeGenre, {{kFieldNameAge, reindexer::KeyValueType::Int{}}, {kFieldNameGenre, reindexer::KeyValueType::Int{}}}); addIndexFields(default_namespace, kCompositeFieldUuidName, {{kFieldNameUuid, reindexer::KeyValueType::Uuid{}}, {kFieldNameName, reindexer::KeyValueType::String{}}}); + addIndexFields(default_namespace, kFieldNameYearSparse, {{kFieldNameYearSparse, reindexer::KeyValueType::String{}}}); err = rt.reindexer->OpenNamespace(joinNs); ASSERT_TRUE(err.ok()) << err.what(); @@ -81,6 +99,12 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { IndexDeclaration{kFieldNameName, "tree", "string", IndexOpts(), 0}, IndexDeclaration{kFieldNameDescription, "text", "string", IndexOpts{}, 0}, IndexDeclaration{kFieldNameYearSparse, "hash", "string", IndexOpts().Sparse(), 0}}); + addIndexFields(joinNs, kFieldNameId, {{kFieldNameId, reindexer::KeyValueType::Int{}}}); + addIndexFields(joinNs, kFieldNameYear, {{kFieldNameYear, reindexer::KeyValueType::Int{}}}); + addIndexFields(joinNs, kFieldNameAge, {{kFieldNameAge, reindexer::KeyValueType::Int{}}}); + addIndexFields(joinNs, kFieldNameName, {{kFieldNameName, reindexer::KeyValueType::String{}}}); + addIndexFields(joinNs, kFieldNameDescription, {{kFieldNameDescription, reindexer::KeyValueType::String{}}}); + addIndexFields(joinNs, kFieldNameYearSparse, {{kFieldNameYearSparse, reindexer::KeyValueType::String{}}}); err = rt.reindexer->OpenNamespace(testSimpleNs); ASSERT_TRUE(err.ok()) << err.what(); @@ -90,6 +114,10 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { IndexDeclaration{kFieldNameName, "hash", "string", IndexOpts(), 0}, IndexDeclaration{kFieldNamePhone, "hash", "string", IndexOpts(), 0}, }); + addIndexFields(testSimpleNs, kFieldNameId, {{kFieldNameId, reindexer::KeyValueType::Int{}}}); + addIndexFields(testSimpleNs, kFieldNameYear, {{kFieldNameYear, reindexer::KeyValueType::Int{}}}); + addIndexFields(testSimpleNs, kFieldNameName, {{kFieldNameName, reindexer::KeyValueType::String{}}}); + addIndexFields(testSimpleNs, kFieldNamePhone, {{kFieldNamePhone, reindexer::KeyValueType::String{}}}); err = rt.reindexer->OpenNamespace(compositeIndexesNs); ASSERT_TRUE(err.ok()) << err.what(); @@ -105,6 +133,12 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { IndexDeclaration{kCompositeFieldPriceTitle, "hash", "composite", IndexOpts(), 0}, IndexDeclaration{kCompositeFieldPagesTitle, "hash", "composite", IndexOpts(), 0}, IndexDeclaration{kCompositeFieldBookidBookid2, "hash", "composite", IndexOpts().PK(), 0}}); + addIndexFields(compositeIndexesNs, kFieldNameBookid, {{kFieldNameBookid, reindexer::KeyValueType::Int{}}}); + addIndexFields(compositeIndexesNs, kFieldNameBookid2, {{kFieldNameBookid2, reindexer::KeyValueType::Int{}}}); + addIndexFields(compositeIndexesNs, kFieldNameTitle, {{kFieldNameTitle, reindexer::KeyValueType::String{}}}); + addIndexFields(compositeIndexesNs, kFieldNamePages, {{kFieldNamePages, reindexer::KeyValueType::Int{}}}); + addIndexFields(compositeIndexesNs, kFieldNamePrice, {{kFieldNamePrice, reindexer::KeyValueType::Int{}}}); + addIndexFields(compositeIndexesNs, kFieldNameName, {{kFieldNameName, reindexer::KeyValueType::String{}}}); addIndexFields(compositeIndexesNs, kCompositeFieldPricePages, {{kFieldNamePrice, reindexer::KeyValueType::Int{}}, {kFieldNamePages, reindexer::KeyValueType::Int{}}}); addIndexFields(compositeIndexesNs, kCompositeFieldTitleName, @@ -127,12 +161,22 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { IndexDeclaration{kFieldNameColumnString, "-", "string", IndexOpts(), 0}, IndexDeclaration{kFieldNameColumnFullText, "text", "string", IndexOpts().SetConfig(R"xxx({"stemmers":[]})xxx"), 0}, IndexDeclaration{kFieldNameColumnStringNumeric, "-", "string", IndexOpts().SetCollateMode(CollateNumeric), 0}}); + addIndexFields(comparatorsNs, kFieldNameId, {{kFieldNameId, reindexer::KeyValueType::Int{}}}); + addIndexFields(comparatorsNs, kFieldNameColumnInt, {{kFieldNameColumnInt, reindexer::KeyValueType::Int{}}}); + addIndexFields(comparatorsNs, kFieldNameColumnInt64, {{kFieldNameColumnInt64, reindexer::KeyValueType::Int64{}}}); + addIndexFields(comparatorsNs, kFieldNameColumnDouble, {{kFieldNameColumnDouble, reindexer::KeyValueType::Double{}}}); + addIndexFields(comparatorsNs, kFieldNameColumnString, {{kFieldNameColumnString, reindexer::KeyValueType::String{}}}); + addIndexFields(comparatorsNs, kFieldNameColumnFullText, {{kFieldNameColumnFullText, reindexer::KeyValueType::String{}}}); + addIndexFields(comparatorsNs, kFieldNameColumnStringNumeric, {{kFieldNameColumnStringNumeric, reindexer::KeyValueType::String{}}}); err = rt.reindexer->OpenNamespace(forcedSortOffsetNs); ASSERT_TRUE(err.ok()) << err.what(); DefineNamespaceDataset(forcedSortOffsetNs, {IndexDeclaration{kFieldNameId, "hash", "int", IndexOpts().PK(), 0}, IndexDeclaration{kFieldNameColumnHash, "hash", "int", IndexOpts(), 0}, IndexDeclaration{kFieldNameColumnTree, "tree", "int", IndexOpts(), 0}}); + addIndexFields(forcedSortOffsetNs, kFieldNameId, {{kFieldNameId, reindexer::KeyValueType::Int{}}}); + addIndexFields(forcedSortOffsetNs, kFieldNameColumnHash, {{kFieldNameColumnHash, reindexer::KeyValueType::Int{}}}); + addIndexFields(forcedSortOffsetNs, kFieldNameColumnTree, {{kFieldNameColumnTree, reindexer::KeyValueType::Int{}}}); err = rt.reindexer->OpenNamespace(geomNs); ASSERT_TRUE(err.ok()) << err.what(); @@ -142,12 +186,20 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { IndexDeclaration{kFieldNamePointLinearRTree, "rtree", "point", IndexOpts{}.RTreeType(IndexOpts::Linear), 0}, IndexDeclaration{kFieldNamePointGreeneRTree, "rtree", "point", IndexOpts{}.RTreeType(IndexOpts::Greene), 0}, IndexDeclaration{kFieldNamePointRStarRTree, "rtree", "point", IndexOpts{}.RTreeType(IndexOpts::RStar), 0}}); + addIndexFields(geomNs, kFieldNameId, {{kFieldNameId, reindexer::KeyValueType::Int{}}}); + addIndexFields(geomNs, kFieldNamePointQuadraticRTree, {{kFieldNamePointQuadraticRTree, reindexer::KeyValueType::Double{}}}); + addIndexFields(geomNs, kFieldNamePointLinearRTree, {{kFieldNamePointLinearRTree, reindexer::KeyValueType::Double{}}}); + addIndexFields(geomNs, kFieldNamePointGreeneRTree, {{kFieldNamePointGreeneRTree, reindexer::KeyValueType::Double{}}}); + addIndexFields(geomNs, kFieldNamePointRStarRTree, {{kFieldNamePointRStarRTree, reindexer::KeyValueType::Double{}}}); err = rt.reindexer->OpenNamespace(btreeIdxOptNs); ASSERT_TRUE(err.ok()) << err.what(); DefineNamespaceDataset(btreeIdxOptNs, {IndexDeclaration{kFieldNameId, "tree", "int", IndexOpts().PK(), 0}, IndexDeclaration{kFieldNameStartTime, "tree", "int", IndexOpts(), 0}}); + addIndexFields(btreeIdxOptNs, kFieldNameId, {{kFieldNameId, reindexer::KeyValueType::Int{}}}); + addIndexFields(btreeIdxOptNs, kFieldNameStartTime, {{kFieldNameStartTime, reindexer::KeyValueType::Int{}}}); initConditionsNs(); + initUUIDNs(); } void initConditionsNs(); @@ -155,6 +207,9 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { void CheckConditions(); enum class NullAllowed : bool { Yes = true, No = false }; void checkAllConditions(const std::string& fieldName, reindexer::KeyValueType fieldType, NullAllowed); + void initUUIDNs(); + void FillUUIDNs(); + void CheckUUIDQueries(); template void ExecuteAndVerify(const Query& query, T... args) { @@ -515,8 +570,9 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { tr.Insert(std::move(item)); } QueryResults res; - rt.reindexer->CommitTransaction(tr, res); - const auto err = Commit(default_namespace); + auto err = rt.reindexer->CommitTransaction(tr, res); + ASSERT_TRUE(err.ok()) << err.what(); + err = Commit(default_namespace); ASSERT_TRUE(err.ok()) << err.what(); } @@ -699,8 +755,6 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { static const std::vector distincts = {"", kFieldNameYear, kFieldNameRate}; static const std::vector sortOrders = {true, false}; - static const std::string compositeIndexName(kFieldNameAge + compositePlus + kFieldNameGenre); - for (const bool sortOrder : sortOrders) { for (const auto& sortIdx : sortIdxs) { for (const std::string& distinct : distincts) { @@ -870,7 +924,7 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { // end of check substituteCompositIndexes ExecuteAndVerify(Query(default_namespace) - .Where(kFieldNamePackages, CondEmpty, 0) + .Where(kFieldNamePackages, CondEmpty, {}) .Distinct(distinct.c_str()) .Sort(sortIdx, sortOrder)); @@ -900,7 +954,7 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { ExecuteAndVerify(Query(default_namespace).Sort(kFieldNameGenre, true, {10, 20, 30})); ExecuteAndVerify(Query(default_namespace) - .Where(kFieldNamePackages, CondAny, 0) + .Where(kFieldNamePackages, CondAny, {}) .Distinct(distinct.c_str()) .Sort(sortIdx, sortOrder)); @@ -966,7 +1020,7 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { .Where(kFieldNameGenre, CondEq, 3) .Where(kFieldNamePackages, CondSet, RandIntVector(5, 10000, 50)) .Or() - .Where(kFieldNamePackages, CondEmpty, 0) + .Where(kFieldNamePackages, CondEmpty, {}) .Debug(LogTrace)); ExecuteAndVerify(Query(default_namespace) @@ -976,7 +1030,7 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { .Where(kFieldNameGenre, CondSet, {5, 1, 7}) .Where(kFieldNameYear, CondLt, 2010) .Or() - .Where(kFieldNamePackages, CondAny, 0) + .Where(kFieldNamePackages, CondAny, {}) .Where(kFieldNamePackages, CondSet, RandIntVector(5, 10000, 50)) .Debug(LogTrace)); @@ -1112,7 +1166,7 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { .ReqTotal() .Distinct(distinct) .Sort(sortIdx, sortOrder) - .WhereComposite(compositeIndexName.c_str(), CondLe, {{Variant(27), Variant(10000)}})); + .WhereComposite(kCompositeFieldAgeGenre, CondLe, {{Variant(27), Variant(10000)}})); ExecuteAndVerify(Query(default_namespace) .ReqTotal() @@ -1125,7 +1179,7 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { .ReqTotal() .Distinct(distinct) .Sort(sortIdx, sortOrder) - .WhereComposite(compositeIndexName.c_str(), CondEq, {{Variant(rand() % 10), Variant(rand() % 50)}})); + .WhereComposite(kCompositeFieldAgeGenre, CondEq, {{Variant(rand() % 10), Variant(rand() % 50)}})); ExecuteAndVerify(Query(default_namespace) .InnerJoin(kFieldNameYear, kFieldNameYear, CondEq, Query(joinNs)) @@ -1461,83 +1515,64 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { .CloseBracket() .Distinct(distinct)); - for (CondType cond : {CondEq, CondSet, CondLt, CondLe, CondGt, CondGe}) { - const bool multyArgCond = cond == CondEq || cond == CondSet; - ExecuteAndVerify(Query(default_namespace) - .Where(kFieldNameUuid, cond, randUuid()) - .Distinct(distinct.c_str()) - .Sort(sortIdx, sortOrder)); - - ExecuteAndVerify(Query(default_namespace) - .Where(kFieldNameUuid, cond, randStrUuid()) - .Distinct(distinct.c_str()) - .Sort(sortIdx, sortOrder)); - - ExecuteAndVerify(Query(default_namespace) - .Where(kFieldNameUuid, cond, - multyArgCond ? VariantArray::Create(randUuid(), randStrUuid(), randUuid(), - randStrUuid(), randUuid()) - : VariantArray::Create(randUuid())) - .Distinct(distinct.c_str()) - .Sort(sortIdx, sortOrder)); - - ExecuteAndVerify(Query(default_namespace) - .Where(kFieldNameUuidArr, cond, randUuid()) - .Distinct(distinct.c_str()) - .Sort(sortIdx, sortOrder)); + for (CondType cond : {CondEq, CondSet, CondLt, CondLe, CondGt, CondGe, CondRange}) { + const auto argsCount = minMaxArgs(cond, 20); + if (argsCount.min <= 1 && argsCount.max >= 1) { + ExecuteAndVerify(Query(default_namespace) + .Where(kFieldNameUuid, cond, randUuid()) + .Distinct(distinct.c_str()) + .Sort(sortIdx, sortOrder)); + + ExecuteAndVerify(Query(default_namespace) + .Where(kFieldNameUuid, cond, randStrUuid()) + .Distinct(distinct.c_str()) + .Sort(sortIdx, sortOrder)); + + ExecuteAndVerify(Query(default_namespace) + .Where(kFieldNameUuidArr, cond, randUuid()) + .Distinct(distinct.c_str()) + .Sort(sortIdx, sortOrder)); + + ExecuteAndVerify(Query(default_namespace) + .Where(kFieldNameUuidArr, cond, randStrUuid()) + .Distinct(distinct.c_str()) + .Sort(sortIdx, sortOrder)); + } ExecuteAndVerify(Query(default_namespace) - .Where(kFieldNameUuidArr, cond, randStrUuid()) + .Where(kFieldNameUuid, cond, randHeterogeneousUuidArray(argsCount.min, argsCount.max)) .Distinct(distinct.c_str()) .Sort(sortIdx, sortOrder)); ExecuteAndVerify(Query(default_namespace) - .Where(kFieldNameUuidArr, cond, - multyArgCond ? VariantArray::Create(randUuid(), randStrUuid(), randUuid(), - randStrUuid(), randUuid(), randStrUuid()) - : VariantArray::Create(randUuid())) + .Where(kFieldNameUuidArr, cond, randHeterogeneousUuidArray(argsCount.min, argsCount.max)) .Distinct(distinct.c_str()) .Sort(sortIdx, sortOrder)); + std::vector compositeKeyValues; + VariantArray hetUuidArray = randHeterogeneousUuidArray(argsCount.min, argsCount.max); + compositeKeyValues.reserve(hetUuidArray.size()); + std::transform(std::make_move_iterator(hetUuidArray.begin()), std::make_move_iterator(hetUuidArray.end()), + std::back_inserter(compositeKeyValues), + [this](Variant&& uuid) { return VariantArray::Create(std::move(uuid), RandString()); }); ExecuteAndVerify(Query(default_namespace) - .WhereComposite(kFieldNameUuid + compositePlus + kFieldNameName, cond, - multyArgCond - ? std::vector{VariantArray::Create(randUuid(), RandString()), - VariantArray::Create(randStrUuid(), RandString()), - VariantArray::Create(randUuid(), RandString()), - VariantArray::Create(randStrUuid(), RandString()), - VariantArray::Create(randUuid(), RandString()), - VariantArray::Create(randStrUuid(), RandString()), - VariantArray::Create(randUuid(), RandString()), - VariantArray::Create(randStrUuid(), RandString()), - VariantArray::Create(randUuid(), RandString()), - VariantArray::Create(randStrUuid(), RandString()), - VariantArray::Create(randUuid(), RandString()), - VariantArray::Create(randStrUuid(), RandString()), - VariantArray::Create(randUuid(), RandString()), - VariantArray::Create(randStrUuid(), RandString()), - VariantArray::Create(randUuid(), RandString()), - VariantArray::Create(randStrUuid(), RandString()), - VariantArray::Create(randUuid(), RandString()), - VariantArray::Create(randStrUuid(), RandString()), - VariantArray::Create(randUuid(), RandString())} - : std::vector{VariantArray::Create(randUuid(), RandString())}) + .WhereComposite(kCompositeFieldUuidName, cond, compositeKeyValues) .Distinct(distinct.c_str()) .Sort(sortIdx, sortOrder)); } ExecuteAndVerify(Query(default_namespace) - .Where(kFieldNameUuid, CondRange, {nilUuid(), randUuid()}) + .Where(kFieldNameUuid, CondRange, randHeterogeneousUuidArray(2, 2)) .Distinct(distinct.c_str()) .Sort(sortIdx, sortOrder)); ExecuteAndVerify(Query(default_namespace) - .Where(kFieldNameUuidArr, CondRange, {nilUuid(), randUuid()}) + .Where(kFieldNameUuidArr, CondRange, randHeterogeneousUuidArray(2, 2)) .Distinct(distinct.c_str()) .Sort(sortIdx, sortOrder)); ExecuteAndVerify(Query(default_namespace) - .WhereComposite(kFieldNameUuid + compositePlus + kFieldNameName, CondRange, + .WhereComposite(kCompositeFieldUuidName, CondRange, {VariantArray::Create(nilUuid(), RandString()), VariantArray::Create(randUuid(), RandString())}) .Distinct(distinct.c_str()) @@ -2178,7 +2213,13 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { const char* kFieldNamePages = "pages"; const char* kFieldNamePrice = "price"; const char* kFieldNameUuid = "uuid"; + const char* kFieldNameUuidSparse = "uuid_sparse"; const char* kFieldNameUuidArr = "uuid_arr"; + const char* kFieldNameUuidArrSparse = "uuid_arr_sparse"; + const char* kFieldNameUuidNotIndex = "uuid_not_index"; + const char* kFieldNameUuidNotIndex2 = "uuid_not_index_2"; + const char* kFieldNameUuidNotIndex3 = "uuid_not_index_3"; + const char* kFieldNameRndString = "rndString"; const char* kFieldNameBtreeIdsets = "btree_idsets"; const char* kFieldNamePointQuadraticRTree = "point_quadratic_rtree"; const char* kFieldNamePointLinearRTree = "point_linear_rtree"; @@ -2204,6 +2245,7 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { const std::string forcedSortOffsetNs = "forced_sort_offset_namespace"; const std::string nsWithObject = "namespace_with_object"; const std::string geomNs = "geom_namespace"; + const std::string uuidNs = "uuid_namespace"; const std::string btreeIdxOptNs = "btree_idx_opt_namespace"; const std::string conditionsNs = "conditions_namespace"; @@ -2221,6 +2263,7 @@ class QueriesApi : public ReindexerApi, public QueriesVerifier { static constexpr size_t forcedSortOffsetNsSize = 1000; static constexpr int forcedSortOffsetMaxValue = 1000; static constexpr size_t geomNsSize = 10000; + static constexpr size_t uuidNsSize = 10000; static constexpr int btreeIdxOptNsSize = 10000; size_t conditionsNsSize = 0; std::vector> forcedSortOffsetValues; diff --git a/cpp_src/gtests/tests/fixtures/queries_verifier.h b/cpp_src/gtests/tests/fixtures/queries_verifier.h index 861c05861..ed717fdca 100644 --- a/cpp_src/gtests/tests/fixtures/queries_verifier.h +++ b/cpp_src/gtests/tests/fixtures/queries_verifier.h @@ -26,6 +26,8 @@ class QueriesVerifier : public virtual ::testing::Test { std::string name; reindexer::KeyValueType type; }; + using IndexesData = std::unordered_map>; + void Verify(const reindexer::QueryResults& qr, const reindexer::Query& query, reindexer::Reindexer& rx) { std::unordered_set, PkHash> pks; std::unordered_map> distincts; @@ -40,14 +42,14 @@ class QueriesVerifier : public virtual ::testing::Test { ASSERT_TRUE(err.ok()) << err.what(); Verify(js.QueryResults(), js.JoinQuery(), rx); } - const auto& indexesFields = indexesFields_[query._namespace]; + const auto& indexesFields = indexesFields_[query.NsName()]; for (size_t i = 0; i < qr.Count(); ++i) { reindexer::Item itemr(qr[i].GetItem(false)); - auto pk = getPk(itemr, query._namespace); + auto pk = getPk(itemr, query.NsName()); EXPECT_TRUE(pks.insert(pk).second) << "Duplicated primary key: " + getPkString(pk); - InsertedItemsByPk& insertedItemsByPk = insertedItems_[query._namespace]; + InsertedItemsByPk& insertedItemsByPk = insertedItems_[query.NsName()]; auto itInsertedItem = insertedItemsByPk.find(pk); EXPECT_NE(itInsertedItem, insertedItemsByPk.end()) << "Item with such PK has not been inserted yet: " + getPkString(pk); if (itInsertedItem != insertedItemsByPk.end()) { @@ -142,14 +144,14 @@ class QueriesVerifier : public virtual ::testing::Test { // Check non found items, to not match conditions // If query has limit and offset, skip verification - if (query.start != 0 || query.count != reindexer::QueryEntry::kDefaultLimit) return; + if (query.HasOffset() || query.HasLimit()) return; // If query has distinct, skip verification for (const auto& agg : query.aggregations_) { if (agg.Type() == AggDistinct) return; } - for (auto& insertedItem : insertedItems_[query._namespace]) { + for (auto& insertedItem : insertedItems_[query.NsName()]) { if (pks.find(insertedItem.first) != pks.end()) continue; bool conditionsSatisfied = checkConditions(insertedItem.second, query.entries.cbegin(), query.entries.cend(), joinedSelectors, indexesFields); @@ -161,7 +163,7 @@ class QueriesVerifier : public virtual ::testing::Test { } auto aggResults = qr.GetAggregationResults(); - if (query.calcTotal != ModeNoTotal) { + if (query.CalcTotal() != ModeNoTotal) { // calcTotal from version 3.0.2 also return total count in aggregations, so we have remove it from here for // clean compare aggresults with aggregations aggResults.pop_back(); @@ -232,7 +234,7 @@ class QueriesVerifier : public virtual ::testing::Test { private: bool checkConditions(const reindexer::Item& item, reindexer::QueryEntries::const_iterator it, reindexer::QueryEntries::const_iterator to, const std::vector& joinedSelectors, - const std::unordered_map>& indexesFields) { + const IndexesData& indexesFields) { bool result = true; for (; it != to; ++it) { OpType op = it->operation; @@ -247,7 +249,7 @@ class QueriesVerifier : public virtual ::testing::Test { return checkConditions(item, it.cbegin(), it.cend(), joinedSelectors, indexesFields); }, [&](const reindexer::QueryEntry& qe) { - if ((op == OpOr && result) || qe.distinct) { + if ((op == OpOr && result) || qe.Distinct()) { skip = true; return false; } @@ -287,8 +289,7 @@ class QueriesVerifier : public virtual ::testing::Test { return result; } - static std::string getFieldName(const std::string& indexName, - const std::unordered_map>& indexesFields) { + static std::string getFieldName(const std::string& indexName, const IndexesData& indexesFields) { if (const auto it = indexesFields.find(indexName); it == indexesFields.end()) { return indexName; } else { @@ -300,15 +301,15 @@ class QueriesVerifier : public virtual ::testing::Test { static bool checkDistincts(reindexer::Item& item, const reindexer::Query& qr, std::unordered_map>& distincts, - const std::unordered_map>& indexesFields) { + const IndexesData& indexesFields) { bool result = true; // check only on root level for (auto it = qr.entries.cbegin(); it != qr.entries.cend(); ++it) { if (!it->HoldsOrReferTo()) continue; const reindexer::QueryEntry& qentry = it->Value(); - if (!qentry.distinct) continue; + if (!qentry.Distinct()) continue; - const std::string fieldName = getFieldName(qentry.index, indexesFields); + const std::string fieldName = getFieldName(qentry.FieldName(), indexesFields); reindexer::VariantArray fieldValue = item[fieldName]; EXPECT_EQ(fieldValue.size(), 1) << "Distinct field's size cannot be > 1"; if (fieldValue.empty()) return false; @@ -316,30 +317,30 @@ class QueriesVerifier : public virtual ::testing::Test { std::unordered_set& values = distincts[fieldName]; reindexer::Variant keyValue(fieldValue[0]); bool inserted = values.insert(keyValue.As()).second; - EXPECT_TRUE(inserted) << "Duplicate distinct item for index: " << keyValue.As() << ", " << qentry.idxNo; + EXPECT_TRUE(inserted) << "Duplicate distinct item for index: " << keyValue.As() << ", " << qentry.FieldName() + << " (" << qentry.IndexNo() << ')'; result &= inserted; } return result; } - bool checkCondition(const reindexer::Item& item, const JoinedSelectorMock& joinedSelector, - const std::unordered_map>& leftIndexesFields, - const std::unordered_map>& rightIndexesFields) { + bool checkCondition(const reindexer::Item& item, const JoinedSelectorMock& joinedSelector, const IndexesData& leftIndexesFields, + const IndexesData& rightIndexesFields) { for (auto it : joinedSelector.QueryResults()) { const reindexer::Item& rightItem = it.GetItem(false); bool result = true; const auto& joinEntries{joinedSelector.JoinQuery().joinEntries_}; assertrx(!joinEntries.empty()); - assertrx(joinEntries[0].op_ != OpOr); + assertrx(joinEntries[0].Operation() != OpOr); for (const auto& je : joinEntries) { - if (je.op_ == OpOr) { + if (je.Operation() == OpOr) { if (result) continue; } else if (!result) { break; } - const bool curResult = - checkOnCondition(item, rightItem, je.index_, je.joinIndex_, je.condition_, leftIndexesFields, rightIndexesFields); - switch (je.op_) { + const bool curResult = checkOnCondition(item, rightItem, je.LeftFieldName(), je.RightFieldName(), je.Condition(), + leftIndexesFields, rightIndexesFields); + switch (je.Operation()) { case OpAnd: result = curResult; break; @@ -359,9 +360,8 @@ class QueriesVerifier : public virtual ::testing::Test { } bool checkOnCondition(const reindexer::Item& leftItem, const reindexer::Item& rightItem, const std::string& leftIndexName, - const std::string& rightIndexName, CondType cond, - const std::unordered_map>& leftIndexesFields, - const std::unordered_map>& rightIndexesFields) { + const std::string& rightIndexName, CondType cond, const IndexesData& leftIndexesFields, + const IndexesData& rightIndexesFields) { const CollateOpts& collate = indexesCollates[leftIndexName]; const std::string leftFieldName = getFieldName(leftIndexName, leftIndexesFields); const std::string rightFieldName = getFieldName(rightIndexName, rightIndexesFields); @@ -375,21 +375,20 @@ class QueriesVerifier : public virtual ::testing::Test { return false; } - bool checkCondition(const reindexer::Item& item, const reindexer::QueryEntry& qentry, - const std::unordered_map>& indexesFields) { + bool checkCondition(const reindexer::Item& item, const reindexer::QueryEntry& qentry, const IndexesData& indexesFields) { EXPECT_GT(item.NumFields(), 0); - if (isGeomConditions(qentry.condition)) { + if (isGeomConditions(qentry.Condition())) { return checkGeomConditions(item, qentry, indexesFields); } - const CollateOpts& collate = indexesCollates[qentry.index]; + const CollateOpts& collate = indexesCollates[qentry.FieldName()]; - if (isIndexComposite(item, qentry)) { - return checkCompositeValues(item, qentry, collate, indexesFields); + if (isIndexComposite(qentry.FieldName(), indexesFields)) { + return checkCompositeCondition(item, qentry, collate, indexesFields); } else { std::string fieldName; reindexer::KeyValueType fieldType = reindexer::KeyValueType::Undefined{}; - if (const auto it = indexesFields.find(qentry.index); it == indexesFields.end()) { - fieldName = qentry.index; + if (const auto it = indexesFields.find(qentry.FieldName()); it == indexesFields.end()) { + fieldName = qentry.FieldName(); } else { EXPECT_EQ(it->second.size(), 1); assertrx(!it->second.empty()); @@ -397,13 +396,13 @@ class QueriesVerifier : public virtual ::testing::Test { fieldType = it->second[0].type; } reindexer::VariantArray fieldValues = item[fieldName]; - switch (qentry.condition) { + switch (qentry.Condition()) { case CondEmpty: return fieldValues.size() == 0; case CondAny: return fieldValues.size() > 0; case CondAllSet: - return checkAllSet(fieldValues, qentry.values, collate, fieldType); + return checkAllSet(fieldValues, qentry.Values(), collate, fieldType); case CondEq: case CondLt: case CondLe: @@ -414,7 +413,8 @@ class QueriesVerifier : public virtual ::testing::Test { case CondLike: case CondDWithin: for (const reindexer::Variant& fieldValue : fieldValues) { - if (compareValue(fieldValue, qentry.condition, qentry.values, collate, fieldType)) return true; + if (compareValue(fieldValue, qentry.Condition(), qentry.Values(), collate, fieldType)) + return true; } } } @@ -424,16 +424,15 @@ class QueriesVerifier : public virtual ::testing::Test { static bool isGeomConditions(CondType cond) noexcept { return cond == CondType::CondDWithin; } - static bool checkGeomConditions(const reindexer::Item& item, const reindexer::QueryEntry& qentry, - const std::unordered_map>& indexesFields) { - assertrx(qentry.values.size() == 2); - const reindexer::VariantArray coordinates = item[getFieldName(qentry.index, indexesFields)]; + static bool checkGeomConditions(const reindexer::Item& item, const reindexer::QueryEntry& qentry, const IndexesData& indexesFields) { + assertrx(qentry.Values().size() == 2); + const reindexer::VariantArray coordinates = item[getFieldName(qentry.FieldName(), indexesFields)]; if (coordinates.empty()) return false; assertrx(coordinates.size() == 2); const double x = coordinates[0].As(); const double y = coordinates[1].As(); - if (qentry.condition == CondDWithin) { - return DWithin(reindexer::Point{x, y}, qentry.values[0].As(), qentry.values[1].As()); + if (qentry.Condition() == CondDWithin) { + return DWithin(reindexer::Point{x, y}, qentry.Values()[0].As(), qentry.Values()[1].As()); } else { assertrx(0); abort(); @@ -448,25 +447,24 @@ class QueriesVerifier : public virtual ::testing::Test { return kvalues; } - static std::vector getCompositeFields(const std::string& indexName, - const std::unordered_map>& indexesFields) { + static const std::vector& getCompositeFields(const std::string& indexName, const IndexesData& indexesFields) { const auto it = indexesFields.find(indexName); assert(it != indexesFields.end()); return it->second; } - static bool checkCompositeValues(const reindexer::Item& item, const reindexer::QueryEntry& qentry, const CollateOpts& opts, - const std::unordered_map>& indexesFields) { - const auto fields = getCompositeFields(qentry.index, indexesFields); + static bool checkCompositeCondition(const reindexer::Item& item, const reindexer::QueryEntry& qentry, const CollateOpts& opts, + const IndexesData& indexesFields) { + const auto fields = getCompositeFields(qentry.FieldName(), indexesFields); const reindexer::VariantArray& indexesValues = getValues(item, fields); - const reindexer::VariantArray& keyValues = qentry.values; + const reindexer::VariantArray& keyValues = qentry.Values(); - switch (qentry.condition) { + switch (qentry.Condition()) { case CondEmpty: - return indexesValues.size() == 0; + return indexesValues.empty(); case CondAny: - return indexesValues.size() > 0; + return !indexesValues.empty(); case CondGe: assert(!keyValues.empty()); return compareCompositeValues(indexesValues, keyValues[0], opts) >= 0; @@ -490,10 +488,10 @@ class QueriesVerifier : public virtual ::testing::Test { } return false; case CondAllSet: - for (const reindexer::Variant& kv : indexesValues) { + for (const reindexer::Variant& kv : keyValues) { if (compareCompositeValues(indexesValues, kv, opts) != 0) return false; } - return !indexesValues.empty(); + return !keyValues.empty(); case CondLike: case CondDWithin: default: @@ -640,51 +638,42 @@ class QueriesVerifier : public virtual ::testing::Test { } bool checkCompositeCondition(const reindexer::Item& item, const reindexer::BetweenFieldsQueryEntry& qentry, - const std::unordered_map>& indexesFields) { - const auto firstFields = getCompositeFields(qentry.firstIndex, indexesFields); - const auto secondFields = getCompositeFields(qentry.secondIndex, indexesFields); + const IndexesData& indexesFields) { + const auto& firstFields = getCompositeFields(qentry.LeftFieldName(), indexesFields); + const auto& secondFields = getCompositeFields(qentry.RightFieldName(), indexesFields); assertrx(firstFields.size() == secondFields.size()); - reindexer::BetweenFieldsQueryEntry qe{qentry}; for (size_t i = 0; i < firstFields.size(); ++i) { - qe.firstIndex = firstFields[i].name; - qe.secondIndex = secondFields[i].name; - if (!checkCondition(item, qe, indexesFields)) return false; + if (!checkCondition(item, + reindexer::BetweenFieldsQueryEntry{std::string{firstFields[i].name}, qentry.Condition(), + std::string{secondFields[i].name}}, + indexesFields)) + return false; } return !firstFields.empty(); } - static bool isIndexComposite(const reindexer::BetweenFieldsQueryEntry& qe, - const std::unordered_map>& indexesFields) { - if (qe.firstIndex.find('+') != std::string::npos || qe.secondIndex.find('+') != std::string::npos) return true; - if (const auto it = indexesFields.find(qe.firstIndex); it != indexesFields.end() && it->second.size() > 1) return true; - if (const auto it = indexesFields.find(qe.secondIndex); it != indexesFields.end() && it->second.size() > 1) return true; - return false; + static bool isIndexComposite(const std::string& indexName, const IndexesData& indexesFields) { + const auto it = indexesFields.find(indexName); + return it != indexesFields.end() && it->second.size() > 1; } - static bool isIndexComposite(const reindexer::Item& item, const reindexer::QueryEntry& qentry) { - if (qentry.values.empty()) return false; - if (qentry.idxNo < 0) { - return qentry.values.size() && (qentry.values[0].Type().Is() || - qentry.values[0].Type().Is()); - } - const auto indexType = item.GetIndexType(qentry.idxNo); - return indexType.Is() || indexType.Is(); + static bool isIndexComposite(const reindexer::BetweenFieldsQueryEntry& qe, const IndexesData& indexesFields) { + return isIndexComposite(qe.LeftFieldName(), indexesFields) || isIndexComposite(qe.RightFieldName(), indexesFields); } - bool checkCondition(const reindexer::Item& item, const reindexer::BetweenFieldsQueryEntry& qentry, - const std::unordered_map>& indexesFields) { + bool checkCondition(const reindexer::Item& item, const reindexer::BetweenFieldsQueryEntry& qentry, const IndexesData& indexesFields) { EXPECT_GT(item.NumFields(), 0); assertrx(!isGeomConditions(qentry.Condition())); - const CollateOpts& collate = indexesCollates[qentry.firstIndex]; + const CollateOpts& collate = indexesCollates[qentry.LeftFieldName()]; if (isIndexComposite(qentry, indexesFields)) { return checkCompositeCondition(item, qentry, indexesFields); } - const std::string firstField = getFieldName(qentry.firstIndex, indexesFields); - const std::string secondField = getFieldName(qentry.secondIndex, indexesFields); + const std::string firstField = getFieldName(qentry.LeftFieldName(), indexesFields); + const std::string secondField = getFieldName(qentry.RightFieldName(), indexesFields); reindexer::VariantArray lValues = item[firstField]; reindexer::VariantArray rValues = item[secondField]; switch (qentry.Condition()) { @@ -811,8 +800,8 @@ class QueriesVerifier : public virtual ::testing::Test { std::vector result; result.reserve(query.joinQueries_.size()); for (auto jq : query.joinQueries_) { - jq.count = reindexer::QueryEntry::kDefaultLimit; - jq.start = reindexer::QueryEntry::kDefaultOffset; + jq.Limit(reindexer::QueryEntry::kDefaultLimit); + jq.Offset(reindexer::QueryEntry::kDefaultOffset); jq.sortingEntries_.clear(); jq.forcedSortOrder_.clear(); result.emplace_back(InnerJoin, std::move(jq)); @@ -915,5 +904,5 @@ class QueriesVerifier : public virtual ::testing::Test { } std::unordered_map> ns2pk_; - std::unordered_map>> indexesFields_; + std::unordered_map indexesFields_; }; diff --git a/cpp_src/gtests/tests/fixtures/reindexer_api.h b/cpp_src/gtests/tests/fixtures/reindexer_api.h index 0cc0c4c84..194c32c72 100644 --- a/cpp_src/gtests/tests/fixtures/reindexer_api.h +++ b/cpp_src/gtests/tests/fixtures/reindexer_api.h @@ -6,16 +6,12 @@ #include #include -#include "core/keyvalue/key_string.h" #include "core/keyvalue/variant.h" #include "core/query/query.h" #include "core/reindexer.h" #include "reindexertestapi.h" #include "servercontrol.h" #include "tools/errors.h" -#include "tools/serializer.h" -#include "tools/stringstools.h" -#include "vendor/utf8cpp/utf8.h" using reindexer::Error; using reindexer::Item; diff --git a/cpp_src/gtests/tests/fixtures/servercontrol.cc b/cpp_src/gtests/tests/fixtures/servercontrol.cc index 59cfe9e97..bcbfc80f2 100644 --- a/cpp_src/gtests/tests/fixtures/servercontrol.cc +++ b/cpp_src/gtests/tests/fixtures/servercontrol.cc @@ -124,9 +124,31 @@ void ServerControl::Interface::WriteServerConfig(const std::string& configYaml) void ServerControl::Interface::SetWALSize(int64_t size, std::string_view nsName) { setNamespaceConfigItem(nsName, "wal_size", size); } +void ServerControl::Interface::SetTxAlwaysCopySize(int64_t size, std::string_view nsName) { + setNamespaceConfigItem(nsName, "tx_size_to_always_copy", size); +} + void ServerControl::Interface::SetOptmizationSortWorkers(size_t cnt, std::string_view nsName) { setNamespaceConfigItem(nsName, "optimization_sort_workers", cnt); } + +void ServerControl::Interface::EnableAllProfilings() { + constexpr std::string_view kJsonCfgProfiling = R"json({ + "type":"profiling", + "profiling":{ + "queriesperfstats":true, + "queries_threshold_us":0, + "perfstats":true, + "memstats":true + } + })json"; + auto item = api.reindexer->NewItem(kConfigNs); + ASSERT_TRUE(item.Status().ok()) << item.Status().what(); + auto err = item.FromJSON(kJsonCfgProfiling); + ASSERT_TRUE(err.ok()) << err.what(); + err = api.reindexer->Upsert(kConfigNs, item); + ASSERT_TRUE(err.ok()) << err.what(); +} void ServerControl::Interface::Init() { stopped_ = false; YAML::Node y; diff --git a/cpp_src/gtests/tests/fixtures/servercontrol.h b/cpp_src/gtests/tests/fixtures/servercontrol.h index b268f0b26..86a4154ab 100644 --- a/cpp_src/gtests/tests/fixtures/servercontrol.h +++ b/cpp_src/gtests/tests/fixtures/servercontrol.h @@ -112,8 +112,9 @@ class ServerControl { void WriteServerConfig(const std::string& configYaml); // set server's WAL size void SetWALSize(int64_t size, std::string_view nsName); - + void SetTxAlwaysCopySize(int64_t size, std::string_view nsName); void SetOptmizationSortWorkers(size_t cnt, std::string_view nsName); + void EnableAllProfilings(); reindexer_server::Server srv; #ifndef _WIN32 diff --git a/cpp_src/gtests/tests/fixtures/storage_lazy_load.h b/cpp_src/gtests/tests/fixtures/storage_lazy_load.h index 4feca8291..c32ec7caf 100644 --- a/cpp_src/gtests/tests/fixtures/storage_lazy_load.h +++ b/cpp_src/gtests/tests/fixtures/storage_lazy_load.h @@ -86,7 +86,10 @@ class DISABLED_StorageLazyLoadApi : public ReindexerApi { ASSERT_TRUE(err.ok()) << err.what(); } - void dropNs() { rt.reindexer->DropNamespace(default_namespace); } + void dropNs() { + const auto err = rt.reindexer->DropNamespace(default_namespace); + ASSERT_TRUE(err.ok()) << err.what(); + } int64_t getItemsCount(bool& storageLoaded) { QueryResults qr; diff --git a/cpp_src/gtests/tests/fixtures/systemhelpers.cc b/cpp_src/gtests/tests/fixtures/systemhelpers.cc index dbe3d3ebd..fae4110cb 100644 --- a/cpp_src/gtests/tests/fixtures/systemhelpers.cc +++ b/cpp_src/gtests/tests/fixtures/systemhelpers.cc @@ -34,7 +34,7 @@ pid_t StartProcess(const std::string& program, const std::vector& p if (isMainThread) { // prctl sends signal on thread termination, so this call may lead to unexpected process termination int r = prctl(PR_SET_PDEATHSIG, SIGTERM); if (r == -1) { - perror("prctl error\n"); + perror("prctl error"); exit(1); } } @@ -44,7 +44,7 @@ pid_t StartProcess(const std::string& program, const std::vector& p } int ret = execv(program.c_str(), ¶msPointers[0]); if (ret) { - perror("exec error\n"); + perror("exec error"); exit(1); } } diff --git a/cpp_src/gtests/tests/fixtures/ttl_index_api.h b/cpp_src/gtests/tests/fixtures/ttl_index_api.h index 9e6768fef..5026ed6e0 100644 --- a/cpp_src/gtests/tests/fixtures/ttl_index_api.h +++ b/cpp_src/gtests/tests/fixtures/ttl_index_api.h @@ -11,7 +11,8 @@ class TtlIndexApi : public ReindexerApi { DefineNamespaceDataset(default_namespace, {IndexDeclaration{kFieldId, "hash", "int", IndexOpts().PK(), 0}, IndexDeclaration{kFieldData, "tree", "int", IndexOpts().Array(), 0}, IndexDeclaration{kFieldData, "tree", "int", IndexOpts().Array(), 0}}); - rt.reindexer->AddIndex(default_namespace, reindexer::IndexDef(kFieldDate, {kFieldDate}, "ttl", "int64", IndexOpts(), 1)); + err = rt.reindexer->AddIndex(default_namespace, reindexer::IndexDef(kFieldDate, {kFieldDate}, "ttl", "int64", IndexOpts(), 1)); + ASSERT_TRUE(err.ok()) << err.what(); AddDataToNs(3000); } diff --git a/cpp_src/gtests/tests/fuzzing/fuzzing.cc b/cpp_src/gtests/tests/fuzzing/fuzzing.cc index 5491b6c69..fb71502d8 100644 --- a/cpp_src/gtests/tests/fuzzing/fuzzing.cc +++ b/cpp_src/gtests/tests/fuzzing/fuzzing.cc @@ -1,4 +1,6 @@ #include "fuzzing/fuzzing.h" +#include "args/args.hpp" +#include "fuzzing/index.h" #include "fuzzing/ns.h" #include "fuzzing/query_generator.h" @@ -6,23 +8,25 @@ TEST_F(Fuzzing, BaseTest) { try { const fuzzing::RandomGenerator::ErrFactorType errorFactor{0, 1}; reindexer::WrSerializer ser; - std::unordered_set generatedNames; - fuzzing::RandomGenerator rnd(std::cout, errorFactor); - std::vector namespaces_; + fuzzing::RandomGenerator rnd(errorFactor); + std::vector namespaces; const size_t N = 1; for (size_t i = 0; i < N; ++i) { - namespaces_.emplace_back(rnd.NsName(generatedNames), std::cout, errorFactor); - fuzzing::Ns& ns = namespaces_.back(); + namespaces.emplace_back(rnd.GenerateNsName(), errorFactor); + fuzzing::Ns& ns = namespaces.back(); auto err = rx_.OpenNamespace(ns.GetName()); EXPECT_TRUE(err.ok()) << err.what(); - if (!err.ok()) continue; + if (!err.ok()) { + continue; + } auto& indexes = ns.GetIndexes(); - for (size_t i = 0; i < indexes.size();) { - const auto idxDef = indexes[i].IndexDef(ns.GetRandomGenerator(), ns.GetScheme()); + for (size_t j = 0; j < indexes.size();) { + const fuzzing::Index& idx = indexes[j]; + const auto idxDef = idx.IndexDef(ns.GetRandomGenerator(), ns.GetScheme(), indexes); err = rx_.AddIndex(ns.GetName(), idxDef); EXPECT_TRUE(err.ok()) << err.what(); if (err.ok()) { - ns.AddIndex(indexes[i], !idxDef.opts_.IsDense() && idxDef.opts_.IsSparse() && !idxDef.opts_.IsPK()); + ns.AddIndexToScheme(idx, j); // TODO move to fuzzing::Ns std::vector fields; std::visit(reindexer::overloaded{ [&](const fuzzing::Index::Child& c) { @@ -35,23 +39,22 @@ TEST_F(Fuzzing, BaseTest) { ToKeyValueType(ns.GetScheme().GetFieldType(child.fieldPath))}); } }}, - indexes[i].content); - if (indexes[i].isPk) { + idx.Content()); + if (idx.IsPk()) { setPkFields(ns.GetName(), fields); } - - addIndexFields(ns.GetName(), indexes[i].name, std::move(fields)); - ++i; + addIndexFields(ns.GetName(), idx.Name(), std::move(fields)); + ++j; } else { - indexes.erase(indexes.begin() + i); + indexes.erase(indexes.begin() + j); } } - for (size_t i = 0, s = ns.GetRandomGenerator().RndItemsCount(); i < s; ++i) { + for (size_t j = 0, s = ns.GetRandomGenerator().RndItemsCount(); j < s; ++j) { auto item = rx_.NewItem(ns.GetName()); err = item.Status(); EXPECT_TRUE(err.ok()) << err.what(); if (!err.ok()) continue; - ns.NewItem(ser); + ns.NewItem(ser); // TODO not json err = item.FromJSON(ser.Slice()); EXPECT_TRUE(err.ok()) << err.what() << std::endl << "size: " << ser.Slice().size() << std::endl << ser.Slice(); if (!err.ok()) continue; @@ -61,26 +64,26 @@ TEST_F(Fuzzing, BaseTest) { enum Op : uint8_t { Insert, Upsert, Update, Delete, END = Delete }; switch (rnd.RndWhich()) { case Insert: - err = rx_.Insert(rnd.NsName(ns.GetName(), generatedNames), item); + err = rx_.Insert(rnd.NsName(ns.GetName()), item); if (err.ok() && item.GetID() != -1) { saveItem(std::move(item), ns.GetName()); } break; case Upsert: - err = rx_.Upsert(rnd.NsName(ns.GetName(), generatedNames), item); + err = rx_.Upsert(rnd.NsName(ns.GetName()), item); if (err.ok()) { saveItem(std::move(item), ns.GetName()); } break; case Update: - err = rx_.Update(rnd.NsName(ns.GetName(), generatedNames), item); + err = rx_.Update(rnd.NsName(ns.GetName()), item); if (err.ok() && item.GetID() != -1) { saveItem(std::move(item), ns.GetName()); } break; case Delete: { const auto id = item.GetID(); - err = rx_.Delete(rnd.NsName(ns.GetName(), generatedNames), item); + err = rx_.Delete(rnd.NsName(ns.GetName()), item); if (err.ok() && item.GetID() != id) { deleteItem(item, ns.GetName()); } @@ -94,7 +97,7 @@ TEST_F(Fuzzing, BaseTest) { err = rx_.Select(reindexer::Query(ns.GetName()).ReqTotal(), qr); EXPECT_TRUE(err.ok()) << err.what(); } - fuzzing::QueryGenerator queryGenerator{namespaces_, std::cout, errorFactor}; + fuzzing::QueryGenerator queryGenerator{namespaces, errorFactor}; for (size_t i = 0; i < 100; ++i) { auto query = queryGenerator(); reindexer::QueryResults qr; @@ -113,5 +116,40 @@ TEST_F(Fuzzing, BaseTest) { int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); + + args::ArgumentParser parser("Reindexer fuzzing tests"); + args::HelpFlag help(parser, "help", "show this message", {'h', "help"}); + args::Group progOptions("options"); + args::ValueFlag dbDsn(progOptions, "DSN", + "DSN to 'reindexer'. Can be 'cproto://:/' or 'builtin://'", {'d', "dsn"}, + args::Options::Single | args::Options::Global); + args::ValueFlag output(progOptions, "FILENAME", "A file for saving initial states of random engines", {'s', "save"}, + args::Options::Single | args::Options::Global); + args::ValueFlag input(progOptions, "FILENAME", "A file for initial states of random engines recovery", {'r', "repeat"}, + args::Options::Single | args::Options::Global); + args::GlobalOptions globals(parser, progOptions); + try { + parser.ParseCLI(argc, argv); + } catch (const args::Help&) { + std::cout << parser.Help() << std::endl; + return 1; + } catch (const args::Error& e) { + std::cerr << "ERROR: " << e.what() << std::endl; + std::cout << parser.Help() << std::endl; + return 1; + } + std::string out = args::get(output); + if (!out.empty()) { + fuzzing::RandomGenerator::SetOut(std::move(out)); + } + const std::string in = args::get(input); + if (!in.empty()) { + fuzzing::RandomGenerator::SetIn(in); + } + std::string dsn = args::get(dbDsn); + if (!dsn.empty()) { + Fuzzing::SetDsn(std::move(dsn)); + } + return RUN_ALL_TESTS(); } diff --git a/cpp_src/gtests/tests/mocks/rpcserver_fake.cc b/cpp_src/gtests/tests/mocks/rpcserver_fake.cc index e7611f3f6..abb9d3511 100644 --- a/cpp_src/gtests/tests/mocks/rpcserver_fake.cc +++ b/cpp_src/gtests/tests/mocks/rpcserver_fake.cc @@ -124,8 +124,9 @@ bool RPCServerFake::Start(const std::string &addr, ev::dynamic_loop &loop, Error dispatcher_.Middleware(this, &RPCServerFake::CheckAuth); - listener_.reset(new Listener(loop, cproto::ServerConnection::NewFactory(dispatcher_, false, 1024 * 1024 * 1024))); - return listener_->Bind(addr); + listener_ = + std::make_unique>(loop, cproto::ServerConnection::NewFactory(dispatcher_, false, 1024 * 1024 * 1024)); + return listener_->Bind(addr, socket_domain::tcp); } RPCServerStatus RPCServerFake::Status() const { return state_; } diff --git a/cpp_src/gtests/tests/mocks/rpcserver_fake.h b/cpp_src/gtests/tests/mocks/rpcserver_fake.h index a039e1509..e99ee9585 100644 --- a/cpp_src/gtests/tests/mocks/rpcserver_fake.h +++ b/cpp_src/gtests/tests/mocks/rpcserver_fake.h @@ -19,7 +19,7 @@ struct RPCServerConfig { enum RPCServerStatus { Init, Connected, Stopped }; -struct RPCClientData : public cproto::ClientData { +struct RPCClientData final : public cproto::ClientData { AuthContext auth; int connID; }; diff --git a/cpp_src/gtests/tests/unit/composite_indexes_api.h b/cpp_src/gtests/tests/unit/composite_indexes_api.h index a555bb162..e78f25c55 100644 --- a/cpp_src/gtests/tests/unit/composite_indexes_api.h +++ b/cpp_src/gtests/tests/unit/composite_indexes_api.h @@ -91,16 +91,24 @@ class CompositeIndexesApi : public ReindexerApi { QueryResults qr; auto err = rt.reindexer->Select(query, qr); EXPECT_TRUE(err.ok()) << err.what(); + assert(err.ok()); QueryResults qrSql; auto sqlQuery = query.GetSQL(); err = rt.reindexer->Select(query.GetSQL(), qrSql); EXPECT_TRUE(err.ok()) << err.what(); + assert(err.ok()); EXPECT_EQ(qr.Count(), qrSql.Count()) << "SQL: " << sqlQuery; for (auto it = qr.begin(), itSql = qrSql.begin(); it != qr.end() && itSql != qrSql.end(); ++it, ++itSql) { + EXPECT_TRUE(it.Status().ok()) << it.Status().what(); + assert(it.Status().ok()); reindexer::WrSerializer ser, serSql; - it.GetCJSON(ser); - itSql.GetCJSON(serSql); + err = it.GetCJSON(ser); + EXPECT_TRUE(err.ok()) << err.what(); + assert(err.ok()); + err = itSql.GetCJSON(serSql); + EXPECT_TRUE(err.ok()) << err.what(); + assert(err.ok()); EXPECT_EQ(ser.Slice(), serSql.Slice()) << "SQL: " << sqlQuery; } return qr; diff --git a/cpp_src/gtests/tests/unit/composite_indexes_test.cc b/cpp_src/gtests/tests/unit/composite_indexes_test.cc index b6ab2b70c..c5579d1b5 100644 --- a/cpp_src/gtests/tests/unit/composite_indexes_test.cc +++ b/cpp_src/gtests/tests/unit/composite_indexes_test.cc @@ -46,11 +46,13 @@ TEST_F(CompositeIndexesApi, AddIndexWithExistingCompositeIndex) { static void selectAll(reindexer::Reindexer* reindexer, const std::string& ns) { QueryResults qr; Error err = reindexer->Select(Query(ns, 0, 1000, ModeAccurateTotal), qr); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); for (auto it : qr) { + ASSERT_TRUE(it.Status().ok()) << it.Status().what(); reindexer::WrSerializer wrser; - it.GetJSON(wrser, false); + err = it.GetJSON(wrser, false); + ASSERT_TRUE(err.ok()) << err.what(); } } @@ -64,7 +66,7 @@ TEST_F(CompositeIndexesApi, DropTest2) { for (int i = 0; i < 1000; ++i) { Item item = NewItem(test_ns); - EXPECT_TRUE(!!item); + EXPECT_FALSE(!item); EXPECT_TRUE(item.Status().ok()) << item.Status().what(); item["id"] = i + 1; @@ -118,7 +120,7 @@ TEST_F(CompositeIndexesApi, CompositeIndexesSelectTest) { auto qr = execAndCompareQuery( Query(default_namespace).WhereComposite(compositeIndexName.c_str(), CondEq, {{Variant(priceValue), Variant(pagesValue)}})); - EXPECT_TRUE(qr.Count() == 1); + ASSERT_EQ(qr.Count(), 1); Item pricePageRow = qr.begin().GetItem(false); Variant selectedPrice = pricePageRow[kFieldNamePrice]; @@ -129,8 +131,8 @@ TEST_F(CompositeIndexesApi, CompositeIndexesSelectTest) { Item titleNameRow = qr.begin().GetItem(false); Variant selectedTitle = titleNameRow[kFieldNameTitle]; Variant selectedName = titleNameRow[kFieldNameName]; - EXPECT_TRUE(static_cast(selectedTitle)->compare(std::string(titleValue)) == 0); - EXPECT_TRUE(static_cast(selectedName)->compare(std::string(nameValue)) == 0); + EXPECT_EQ(static_cast(selectedTitle)->compare(std::string(titleValue)), 0); + EXPECT_EQ(static_cast(selectedName)->compare(std::string(nameValue)), 0); execAndCompareQuery(Query(default_namespace).WhereComposite(compositeIndexName, CondLt, {{Variant(priceValue), Variant(pagesValue)}})); execAndCompareQuery(Query(default_namespace).WhereComposite(compositeIndexName, CondLe, {{Variant(priceValue), Variant(pagesValue)}})); diff --git a/cpp_src/gtests/tests/unit/dsl_parser_test.cc b/cpp_src/gtests/tests/unit/dsl_parser_test.cc index 0daee4f86..2fb4bb4c7 100644 --- a/cpp_src/gtests/tests/unit/dsl_parser_test.cc +++ b/cpp_src/gtests/tests/unit/dsl_parser_test.cc @@ -1,17 +1,20 @@ #include "join_selects_api.h" +static void checkQueryDslParse(const reindexer::Query& q) { + const std::string dsl = q.GetJSON(); + Query parsedQuery; + Error err = parsedQuery.FromJSON(dsl); + ASSERT_TRUE(err.ok()) << err.what() << "\nDSL:\n" << dsl; + ASSERT_EQ(q, parsedQuery) << "DSL:\n" << dsl << "\nOriginal query:\n" << q.GetSQL() << "\nParsed query:\n" << parsedQuery.GetSQL(); +} + TEST_F(JoinSelectsApi, JoinsDSLTest) { Query queryGenres(genres_namespace); Query queryAuthors(authors_namespace); Query queryBooks{Query(books_namespace, 0, 10).Where(price, CondGe, 500)}; queryBooks.OrInnerJoin(genreId_fk, genreid, CondEq, std::move(queryGenres)); queryBooks.LeftJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors)); - - std::string dsl = queryBooks.GetJSON(); - Query testLoadDslQuery; - Error err = testLoadDslQuery.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(queryBooks == testLoadDslQuery); + checkQueryDslParse(queryBooks); } TEST_F(JoinSelectsApi, EqualPositionDSLTest) { @@ -22,12 +25,7 @@ TEST_F(JoinSelectsApi, EqualPositionDSLTest) { query.OpenBracket().Where("f4", CondEq, 4).Where("f5", CondLt, 10); query.AddEqualPosition({"f4", "f5"}); query.CloseBracket(); - - std::string dsl = query.GetJSON(); - Query testLoadDslQuery; - Error err = testLoadDslQuery.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(query == testLoadDslQuery); + checkQueryDslParse(query); } TEST_F(JoinSelectsApi, MergedQueriesDSLTest) { @@ -37,28 +35,15 @@ TEST_F(JoinSelectsApi, MergedQueriesDSLTest) { mainBooksQuery.mergeQueries_.emplace_back(Merge, std::move(firstMergedQuery)); mainBooksQuery.mergeQueries_.emplace_back(Merge, std::move(secondMergedQuery)); - - std::string dsl = mainBooksQuery.GetJSON(); - Query testLoadDslQuery; - Error err = testLoadDslQuery.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(mainBooksQuery == testLoadDslQuery); + checkQueryDslParse(mainBooksQuery); } TEST_F(JoinSelectsApi, AggregateFunctonsDSLTest) { Query query{Query(books_namespace, 10, 100).Where(pages, CondGe, 150)}; - query.aggregations_.push_back({AggAvg, {price}}); - query.aggregations_.push_back({AggSum, {pages}}); - query.aggregations_.push_back({AggFacet, {title, pages}, {{{title, true}}}, 100, 10}); - - std::string dsl = query.GetJSON(); - Query testLoadDslQuery; - Error err = testLoadDslQuery.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(query == testLoadDslQuery); + checkQueryDslParse(query); } TEST_F(JoinSelectsApi, SelectFilterDSLTest) { @@ -66,12 +51,7 @@ TEST_F(JoinSelectsApi, SelectFilterDSLTest) { query.selectFilter_.push_back(price); query.selectFilter_.push_back(pages); query.selectFilter_.push_back(title); - - std::string dsl = query.GetJSON(); - Query testLoadDslQuery; - Error err = testLoadDslQuery.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(query == testLoadDslQuery); + checkQueryDslParse(query); } TEST_F(JoinSelectsApi, SelectFilterInJoinDSLTest) { @@ -85,35 +65,18 @@ TEST_F(JoinSelectsApi, SelectFilterInJoinDSLTest) { queryBooks.LeftJoin(authorid_fk, authorid, CondEq, std::move(queryAuthors)); } - std::string dsl = queryBooks.GetJSON(); - Query testLoadDslQuery; - Error err = testLoadDslQuery.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(queryBooks, testLoadDslQuery); + checkQueryDslParse(queryBooks); } TEST_F(JoinSelectsApi, ReqTotalDSLTest) { Query query{Query(books_namespace, 10, 100, ModeNoTotal).Where(pages, CondGe, 150)}; - - std::string dsl1 = query.GetJSON(); - Query testLoadDslQuery1; - Error err = testLoadDslQuery1.FromJSON(dsl1); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(query == testLoadDslQuery1); + checkQueryDslParse(query); query.CachedTotal(); - std::string dsl2 = query.GetJSON(); - Query testLoadDslQuery2; - err = testLoadDslQuery2.FromJSON(dsl2); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(query == testLoadDslQuery2); + checkQueryDslParse(query); query.ReqTotal(); - std::string dsl3 = query.GetJSON(); - Query testLoadDslQuery3; - err = testLoadDslQuery3.FromJSON(dsl3); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_EQ(query, testLoadDslQuery3); + checkQueryDslParse(query); } TEST_F(JoinSelectsApi, SelectFunctionsDSLTest) { @@ -121,22 +84,13 @@ TEST_F(JoinSelectsApi, SelectFunctionsDSLTest) { query.AddFunction("f1()"); query.AddFunction("f2()"); query.AddFunction("f3()"); - - std::string dsl = query.GetJSON(); - Query testLoadDslQuery; - Error err = testLoadDslQuery.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(query == testLoadDslQuery); + checkQueryDslParse(query); } TEST_F(JoinSelectsApi, CompositeValuesDSLTest) { std::string pagesBookidIndex = pages + std::string("+") + bookid; Query query{Query(books_namespace).WhereComposite(pagesBookidIndex.c_str(), CondGe, {{Variant(500), Variant(10)}})}; - std::string dsl = query.GetJSON(); - Query testLoadDslQuery; - Error err = testLoadDslQuery.FromJSON(dsl); - ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(query == testLoadDslQuery); + checkQueryDslParse(query); } TEST_F(JoinSelectsApi, GeneralDSLTest) { @@ -153,12 +107,7 @@ TEST_F(JoinSelectsApi, GeneralDSLTest) { testDslQuery.selectFilter_.push_back(authorid_fk); testDslQuery.AddFunction("f1()"); testDslQuery.AddFunction("f2()"); - testDslQuery.aggregations_.push_back({AggDistinct, {bookid}}); - Query testLoadDslQuery; - const std::string dsl1 = testDslQuery.GetJSON(); - Error err = testLoadDslQuery.FromJSON(dsl1); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(testDslQuery == testLoadDslQuery); + checkQueryDslParse(testDslQuery); } diff --git a/cpp_src/gtests/tests/unit/ft/ft_generic.cc b/cpp_src/gtests/tests/unit/ft/ft_generic.cc index ac58f97de..e2def8c9e 100644 --- a/cpp_src/gtests/tests/unit/ft/ft_generic.cc +++ b/cpp_src/gtests/tests/unit/ft/ft_generic.cc @@ -534,7 +534,8 @@ TEST_P(FTGenericApi, DeleteTest) { // Delete(data[1].first); // Delete(data[1].first); - Delete(data.find("In law, a legal entity is an entity that is capable of bearing legal rights")->second); + const auto err = Delete(data.find("In law, a legal entity is an entity that is capable of bearing legal rights")->second); + ASSERT_TRUE(err.ok()) << err.what(); res = SimpleSelect("entity"); // for (auto it : res) { @@ -572,7 +573,8 @@ TEST_P(FTGenericApi, RebuildAfterDeletion) { auto res = selectF("entity"); ASSERT_EQ(res.Count(), 3); - Delete(data.find("In law, a legal entity is an entity that is capable of bearing legal rights")->second); + err = Delete(data.find("In law, a legal entity is an entity that is capable of bearing legal rights")->second); + ASSERT_TRUE(err.ok()) << err.what(); res = selectF("entity"); ASSERT_EQ(res.Count(), 2); } diff --git a/cpp_src/gtests/tests/unit/ft/ft_stress.cc b/cpp_src/gtests/tests/unit/ft/ft_stress.cc index f25e5429c..e0a0d35e7 100644 --- a/cpp_src/gtests/tests/unit/ft/ft_stress.cc +++ b/cpp_src/gtests/tests/unit/ft/ft_stress.cc @@ -10,7 +10,9 @@ class FTStressApi : public FTApi { }; TEST_P(FTStressApi, BasicStress) { - Init(GetDefaultConfig()); + const std::string kStorage = reindexer::fs::JoinPath(reindexer::fs::GetTempDir(), "reindex_FTApi/BasicStress"); + reindexer::fs::RmDirAll(kStorage); + Init(GetDefaultConfig(), NS1, kStorage); std::vector data; std::vector phrase; @@ -29,7 +31,8 @@ TEST_P(FTStressApi, BasicStress) { std::thread statsThread([&] { while (!terminate) { reindexer::QueryResults qr; - rt.reindexer->Select(reindexer::Query("#memstats"), qr); + const auto err = rt.reindexer->Select(reindexer::Query("#memstats"), qr); + ASSERT_TRUE(err.ok()) << err.what(); std::this_thread::sleep_for(std::chrono::milliseconds(10)); } }); @@ -89,7 +92,8 @@ TEST_P(FTStressApi, ConcurrencyCheck) { lck.unlock(); while (!terminate) { reindexer::QueryResults qr; - rt.reindexer->Select(reindexer::Query("#memstats"), qr); + const auto err = rt.reindexer->Select(reindexer::Query("#memstats"), qr); + ASSERT_TRUE(err.ok()) << err.what(); } }); } else { diff --git a/cpp_src/gtests/tests/unit/join_test.cc b/cpp_src/gtests/tests/unit/join_test.cc index 35f4f46f1..17aee3df6 100644 --- a/cpp_src/gtests/tests/unit/join_test.cc +++ b/cpp_src/gtests/tests/unit/join_test.cc @@ -4,6 +4,7 @@ #include #include "core/itemimpl.h" #include "core/nsselecter/joinedselector.h" +#include "core/type_consts_helpers.h" #include "join_on_conditions_api.h" #include "join_selects_api.h" #include "test_helpers.h" @@ -33,8 +34,8 @@ TEST_F(JoinSelectsApi, JoinsAsWhereConditionsTest) { QueryWatcher watcher{queryBooks}; reindexer::QueryResults qr; Error err = rt.reindexer->Select(queryBooks, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(qr.Count() <= 50); + ASSERT_TRUE(err.ok()) << err.what(); + EXPECT_LE(qr.Count(), 50); CheckJoinsInComplexWhereCondition(qr); } @@ -47,7 +48,7 @@ TEST_F(JoinSelectsApi, JoinsLockWithCache_364) { for (int i = 0; i < 10; ++i) { reindexer::QueryResults qr; Error err = rt.reindexer->Select(queryBooks, qr); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); } } @@ -67,8 +68,8 @@ TEST_F(JoinSelectsApi, JoinsAsWhereConditionsTest2) { QueryWatcher watcher{query}; reindexer::QueryResults qr; Error err = rt.reindexer->Select(query, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(qr.Count() <= 50); + ASSERT_TRUE(err.ok()) << err.what(); + EXPECT_LE(qr.Count(), 50); CheckJoinsInComplexWhereCondition(qr); } @@ -91,14 +92,16 @@ TEST_F(JoinSelectsApi, SqlPasringTest) { Query dstQuery; dstQuery.FromSQL(wrser.Slice()); - ASSERT_TRUE(srcQuery == dstQuery); + ASSERT_EQ(srcQuery, dstQuery); wrser.Reset(); srcQuery.Serialize(wrser); Query deserializedQuery; reindexer::Serializer ser(wrser.Buf(), wrser.Len()); deserializedQuery.Deserialize(ser); - ASSERT_TRUE(srcQuery == deserializedQuery); + ASSERT_EQ(srcQuery, deserializedQuery) << "Original query:\n" + << srcQuery.GetSQL() << "\nDeserialized query:\n" + << deserializedQuery.GetSQL(); } TEST_F(JoinSelectsApi, InnerJoinTest) { @@ -109,14 +112,14 @@ TEST_F(JoinSelectsApi, InnerJoinTest) { reindexer::QueryResults joinQueryRes; Error err = rt.reindexer->Select(joinQuery, joinQueryRes); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = VerifyResJSON(joinQueryRes); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); reindexer::QueryResults pureSelectRes; err = rt.reindexer->Select(queryBooks, pureSelectRes); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); QueryResultRows joinSelectRows; QueryResultRows pureSelectRows; @@ -129,7 +132,7 @@ TEST_F(JoinSelectsApi, InnerJoinTest) { reindexer::QueryResults authorsSelectRes; Query authorsQuery{Query(authors_namespace).Where(authorid, CondEq, authorIdKeyRef)}; err = rt.reindexer->Select(authorsQuery, authorsSelectRes); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); if (err.ok()) { int bookId = booksItem[bookid].Get(); @@ -152,7 +155,7 @@ TEST_F(JoinSelectsApi, LeftJoinTest) { Query booksQuery{Query(books_namespace).Where(price, CondGe, 500)}; reindexer::QueryResults booksQueryRes; Error err = rt.reindexer->Select(booksQuery, booksQueryRes); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); QueryResultRows pureSelectRows; if (err.ok()) { @@ -169,10 +172,10 @@ TEST_F(JoinSelectsApi, LeftJoinTest) { QueryWatcher watcher{joinQuery}; reindexer::QueryResults joinQueryRes; err = rt.reindexer->Select(joinQuery, joinQueryRes); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = VerifyResJSON(joinQueryRes); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); if (err.ok()) { std::unordered_set presentedAuthorIds; @@ -188,7 +191,7 @@ TEST_F(JoinSelectsApi, LeftJoinTest) { for (auto joinedFieldIt = itemIt.begin(); joinedFieldIt != itemIt.end(); ++joinedFieldIt) { reindexer::ItemImpl item2(joinedFieldIt.GetItem(0, joinQueryRes.getPayloadType(1), joinQueryRes.getTagsMatcher(1))); Variant authorIdKeyRef2 = item2.GetField(joinQueryRes.getPayloadType(1).FieldByName(authorid_fk)); - EXPECT_TRUE(authorIdKeyRef1 == authorIdKeyRef2); + EXPECT_EQ(authorIdKeyRef1, authorIdKeyRef2); } presentedAuthorIds.insert(static_cast(authorIdKeyRef1)); @@ -208,15 +211,15 @@ TEST_F(JoinSelectsApi, LeftJoinTest) { int authorId = static_cast(authorIdKeyRef1); auto itAutorid(presentedAuthorIds.find(authorId)); - EXPECT_TRUE(itAutorid != presentedAuthorIds.end()); + EXPECT_NE(itAutorid, presentedAuthorIds.end()); auto itRowidIndex(rowidsIndexes.find(rowid)); - EXPECT_TRUE(itRowidIndex != rowidsIndexes.end()); + EXPECT_NE(itRowidIndex, rowidsIndexes.end()); if (itRowidIndex != rowidsIndexes.end()) { Item item2((joinQueryRes.begin() + rowid).GetItem(false)); Variant authorIdKeyRef2 = item2[authorid]; - EXPECT_TRUE(authorIdKeyRef1 == authorIdKeyRef2); + EXPECT_EQ(authorIdKeyRef1, authorIdKeyRef2); } } } @@ -236,10 +239,10 @@ TEST_F(JoinSelectsApi, OrInnerJoinTest) { reindexer::QueryResults queryRes; Error err = rt.reindexer->Select(orInnerJoinQuery, queryRes); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = VerifyResJSON(queryRes); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); if (err.ok()) { for (auto rowIt : queryRes) { @@ -251,7 +254,7 @@ TEST_F(JoinSelectsApi, OrInnerJoinTest) { for (int i = 0; i < authorIdIt.ItemsCount(); ++i) { reindexer::ItemImpl authorsItem(authorIdIt.GetItem(i, queryRes.getPayloadType(1), queryRes.getTagsMatcher(1))); Variant authorIdKeyRef2 = authorsItem.GetField(queryRes.getPayloadType(1).FieldByName(authorid)); - EXPECT_TRUE(authorIdKeyRef1 == authorIdKeyRef2); + EXPECT_EQ(authorIdKeyRef1, authorIdKeyRef2); } reindexer::joins::JoinedFieldIterator genreIdIt = itemIt.at(genresNsJoinIndex); @@ -259,7 +262,7 @@ TEST_F(JoinSelectsApi, OrInnerJoinTest) { for (int i = 0; i < genreIdIt.ItemsCount(); ++i) { reindexer::ItemImpl genresItem = genreIdIt.GetItem(i, queryRes.getPayloadType(2), queryRes.getTagsMatcher(2)); Variant genresIdKeyRef2 = genresItem.GetField(queryRes.getPayloadType(2).FieldByName(genreid)); - EXPECT_TRUE(genresIdKeyRef1 == genresIdKeyRef2); + EXPECT_EQ(genresIdKeyRef1, genresIdKeyRef2); } } } @@ -292,7 +295,7 @@ TEST_F(JoinSelectsApi, JoinTestSorting) { for (auto rowIt : joinQueryRes) { Item item = rowIt.GetItem(false); if (!prevField.Type().Is()) { - ASSERT_TRUE(prevField.Compare(item[age]) <= 0); + ASSERT_LE(prevField.Compare(item[age]), 0); } Variant key = item[authorid]; @@ -304,14 +307,14 @@ TEST_F(JoinSelectsApi, JoinTestSorting) { for (int i = 0; i < joinedFieldIt.ItemsCount(); ++i) { reindexer::ItemImpl joinItem(joinedFieldIt.GetItem(i, joinQueryRes.getPayloadType(1), joinQueryRes.getTagsMatcher(1))); Variant fkey = joinItem.GetField(joinQueryRes.getPayloadType(1).FieldByName(authorid_fk)); - ASSERT_TRUE(key.Compare(fkey) == 0) << key.As() << " " << fkey.As(); + ASSERT_EQ(key.Compare(fkey), 0) << key.As() << " " << fkey.As(); Variant recentJoinedValue = joinItem.GetField(joinQueryRes.getPayloadType(1).FieldByName(price)); - ASSERT_TRUE(recentJoinedValue.As() >= 200); + ASSERT_GE(recentJoinedValue.As(), 200); if (!prevJoinedValue.Type().Is()) { - ASSERT_TRUE(prevJoinedValue.Compare(recentJoinedValue) >= 0); + ASSERT_GE(prevJoinedValue.Compare(recentJoinedValue), 0); } Variant pagesValue = joinItem.GetField(joinQueryRes.getPayloadType(1).FieldByName(pages)); - ASSERT_TRUE(pagesValue.As() >= 100); + ASSERT_GE(pagesValue.As(), 100); prevJoinedValue = recentJoinedValue; } prevField = item[age]; @@ -350,7 +353,7 @@ TEST_F(JoinSelectsApi, TestSortingByJoinedNs) { const Variant recentValue = joinItem.GetField(joinQueryRes2.getPayloadType(1).FieldByName(age)); if (!prevValue.Type().Is()) { reindexer::WrSerializer ser; - ASSERT_TRUE(prevValue.Compare(recentValue) <= 0) << (prevValue.Dump(ser), ser << ' ', recentValue.Dump(ser), ser.Slice()); + ASSERT_LE(prevValue.Compare(recentValue), 0) << (prevValue.Dump(ser), ser << ' ', recentValue.Dump(ser), ser.Slice()); } prevValue = recentValue; } @@ -365,12 +368,12 @@ TEST_F(JoinSelectsApi, JoinTestSelectNonIndexedField) { qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1) << err.what(); + ASSERT_EQ(qr.Count(), 1); Item theOnlyItem = qr[0].GetItem(false); VariantArray krefs = theOnlyItem[title]; - ASSERT_TRUE(krefs.size() == 1); - ASSERT_TRUE(krefs[0].As() == "Crime and Punishment"); + ASSERT_EQ(krefs.size(), 1); + ASSERT_EQ(krefs[0].As(), "Crime and Punishment"); } TEST_F(JoinSelectsApi, JoinByNonIndexedField) { @@ -400,7 +403,7 @@ TEST_F(JoinSelectsApi, JoinByNonIndexedField) { qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1) << err.what(); + ASSERT_EQ(qr.Count(), 1); // And backwards even! reindexer::QueryResults qr2; @@ -411,7 +414,7 @@ TEST_F(JoinSelectsApi, JoinByNonIndexedField) { qr2); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr2.Count() == 1) << err.what(); + ASSERT_EQ(qr2.Count(), 1); } TEST_F(JoinSelectsApi, JoinsEasyStressTest) { @@ -426,15 +429,15 @@ TEST_F(JoinSelectsApi, JoinsEasyStressTest) { for (size_t i = 0; i < 10; ++i) { reindexer::QueryResults queryRes; Error err = rt.reindexer->Select(orInnerJoinQuery, queryRes); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(queryRes.Count() > 0); + ASSERT_TRUE(err.ok()) << err.what(); + EXPECT_GT(queryRes.Count(), 0); } }; auto removeTh = [this]() { QueryResults qres; Error err = rt.reindexer->Delete(Query(books_namespace, 0, 10).Where(price, CondGe, 5000), qres); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); }; int32_t since = 0, count = 1000; @@ -493,22 +496,20 @@ TEST_F(JoinSelectsApi, JoinPreResultStoreValuesOptimizationStressTest) { QueryResults qres; while (!start) std::this_thread::sleep_for(std::chrono::milliseconds(1)); Error err = rt.reindexer->Select(q, qres); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); }); } start = true; for (auto& th : threads) th.join(); } -static bool checkForAllowedJsonTags(const std::vector& tags, gason::JsonValue jsonValue) { +static void checkForAllowedJsonTags(const std::vector& tags, gason::JsonValue jsonValue) { size_t count = 0; for (const auto& elem : jsonValue) { - if (std::find(tags.begin(), tags.end(), std::string_view(elem.key)) == tags.end()) { - return false; - } + ASSERT_NE(std::find(tags.begin(), tags.end(), std::string_view(elem.key)), tags.end()); ++count; } - return (count == tags.size()); + ASSERT_EQ(count, tags.size()); } TEST_F(JoinSelectsApi, JoinWithSelectFilter) { @@ -525,22 +526,26 @@ TEST_F(JoinSelectsApi, JoinWithSelectFilter) { ASSERT_TRUE(err.ok()) << err.what(); for (auto it : qr) { + ASSERT_TRUE(it.Status().ok()) << it.Status().what(); reindexer::WrSerializer wrser; - it.GetJSON(wrser, false); + err = it.GetJSON(wrser, false); + ASSERT_TRUE(err.ok()) << err.what(); reindexer::joins::ItemIterator joinIt = it.GetJoined(); gason::JsonParser jsonParser; gason::JsonNode root = jsonParser.Parse(reindexer::giftStr(wrser.Slice())); - EXPECT_TRUE(checkForAllowedJsonTags({title, price, "joined_authors_namespace"}, root.value)); + checkForAllowedJsonTags({title, price, "joined_authors_namespace"}, root.value); for (auto fieldIt = joinIt.begin(); fieldIt != joinIt.end(); ++fieldIt) { QueryResults jqr = fieldIt.ToQueryResults(); jqr.addNSContext(qr.getPayloadType(1), qr.getTagsMatcher(1), qr.getFieldsFilter(1), qr.getSchema(1)); for (auto jit : jqr) { + ASSERT_TRUE(jit.Status().ok()) << jit.Status().what(); wrser.Reset(); - jit.GetJSON(wrser, false); + err = jit.GetJSON(wrser, false); + ASSERT_TRUE(err.ok()) << err.what(); root = jsonParser.Parse(reindexer::giftStr(wrser.Slice())); - EXPECT_TRUE(checkForAllowedJsonTags({name, age}, root.value)); + checkForAllowedJsonTags({name, age}, root.value); } } } @@ -573,7 +578,7 @@ TEST_F(JoinSelectsApi, TestMergeWithJoins) { for (auto it : qr) { Item item = it.GetItem(false); auto joined = it.GetJoined(); - ASSERT_TRUE(joined.getJoinedFieldsCount() == 1); + ASSERT_EQ(joined.getJoinedFieldsCount(), 1); bool booksItem = (rowId <= 10000); QueryResults jqr = joined.begin().ToQueryResults(); @@ -585,14 +590,14 @@ TEST_F(JoinSelectsApi, TestMergeWithJoins) { for (auto jit : jqr) { Item jItem = jit.GetItem(false); Variant value = jItem[authorid]; - ASSERT_TRUE(value == fkValue); + ASSERT_EQ(value, fkValue); } } else { Variant fkValue = item[locationid_fk]; for (auto jit : jqr) { Item jItem = jit.GetItem(false); Variant value = jItem[locationid]; - ASSERT_TRUE(value == fkValue); + ASSERT_EQ(value, fkValue); } } @@ -634,30 +639,29 @@ TEST_F(JoinSelectsApi, TestNestedMergesInMergesError) { TEST_F(JoinOnConditionsApi, TestGeneralConditions) { const std::string sqlTemplate = R"(select * from books_namespace inner join books_namespace on (books_namespace.authorid_fk = books_namespace.authorid_fk and books_namespace.pages %s books_namespace.pages);)"; - std::vector conditionsSet = {CondLt, CondLe, CondGt, CondGe, CondEq}; - for (size_t i = 0; i < conditionsSet.size(); ++i) { - CondType condition = conditionsSet[i]; + for (CondType condition : {CondLt, CondLe, CondGt, CondGe, CondEq}) { Query queryBooks; queryBooks.FromSQL(GetSql(sqlTemplate, condition)); QueryResults qr; Error err = rt.reindexer->Select(queryBooks, qr); ASSERT_TRUE(err.ok()) << err.what(); for (auto it : qr) { - auto item = it.GetItem(); + const auto item = it.GetItem(); ASSERT_TRUE(item.Status().ok()) << item.Status().what(); - auto joined = it.GetJoined(); - ASSERT_TRUE(joined.getJoinedFieldsCount() == 1); + const Variant authorid1 = item[authorid_fk]; + const Variant pages1 = item[pages]; + const auto joined = it.GetJoined(); + ASSERT_EQ(joined.getJoinedFieldsCount(), 1); QueryResults jqr = joined.begin().ToQueryResults(); jqr.addNSContext(qr.getPayloadType(0), qr.getTagsMatcher(0), qr.getFieldsFilter(0), qr.getSchema(0)); for (auto jit : jqr) { auto joinedItem = jit.GetItem(); ASSERT_TRUE(joinedItem.Status().ok()) << joinedItem.Status().what(); - Variant authorid1 = item[authorid_fk]; Variant authorid2 = joinedItem[authorid_fk]; - ASSERT_TRUE(authorid1 == authorid2); - Variant pages1 = item[pages]; + ASSERT_EQ(authorid1, authorid2); Variant pages2 = joinedItem[pages]; - ASSERT_TRUE(CompareVariants(pages1, pages2, condition)); + ASSERT_TRUE(CompareVariants(pages1, pages2, condition)) + << pages1.As() << ' ' << reindexer::CondTypeToStr(condition) << ' ' << pages2.As(); } } } @@ -691,14 +695,14 @@ TEST_F(JoinOnConditionsApi, TestComparisonConditions) { auto item1 = it1.GetItem(); ASSERT_TRUE(item1.Status().ok()) << item1.Status().what(); auto joined1 = it1.GetJoined(); - ASSERT_TRUE(joined1.getJoinedFieldsCount() == 1); + ASSERT_EQ(joined1.getJoinedFieldsCount(), 1); QueryResults jqr1 = joined1.begin().ToQueryResults(); jqr1.addNSContext(qr1.getPayloadType(1), qr1.getTagsMatcher(1), qr1.getFieldsFilter(1), qr1.getSchema(0)); auto item2 = it2.GetItem(); ASSERT_TRUE(item2.Status().ok()) << item2.Status().what(); auto joined2 = it2.GetJoined(); - ASSERT_TRUE(joined2.getJoinedFieldsCount() == 1); + ASSERT_EQ(joined2.getJoinedFieldsCount(), 1); QueryResults jqr2 = joined2.begin().ToQueryResults(); jqr2.addNSContext(qr2.getPayloadType(1), qr2.getTagsMatcher(1), qr2.getFieldsFilter(1), qr2.getSchema(0)); @@ -748,8 +752,10 @@ TEST_F(JoinOnConditionsApi, TestLeftJoinOnCondSet) { ASSERT_EQ(qr.Count(), results.size()); int k = 0; for (auto it = qr.begin(); it != qr.end(); ++it, ++k) { + ASSERT_TRUE(it.Status().ok()) << it.Status().what(); reindexer::WrSerializer ser; - it.GetJSON(ser, false); + err = it.GetJSON(ser, false); + ASSERT_TRUE(err.ok()) << err.what(); ASSERT_EQ(ser.c_str(), results[k]); } }; @@ -787,7 +793,7 @@ TEST_F(JoinOnConditionsApi, TestInvalidConditions) { } QueryResults qr; Error err = rt.reindexer->Select(Query(books_namespace).InnerJoin(authorid_fk, authorid, CondAllSet, Query(authors_namespace)), qr); - EXPECT_TRUE(!err.ok()); + EXPECT_FALSE(err.ok()); err = rt.reindexer->Select(Query(books_namespace).InnerJoin(authorid_fk, authorid, CondLike, Query(authors_namespace)), qr); - EXPECT_TRUE(!err.ok()); + EXPECT_FALSE(err.ok()); } diff --git a/cpp_src/gtests/tests/unit/namespace_test.cc b/cpp_src/gtests/tests/unit/namespace_test.cc index 4de15ddae..e8bffaa96 100644 --- a/cpp_src/gtests/tests/unit/namespace_test.cc +++ b/cpp_src/gtests/tests/unit/namespace_test.cc @@ -339,12 +339,12 @@ TEST_F(NsApi, QueryperfstatsNsDummyTest) { QueryResults qr; err = rt.reindexer->Select(Query("#queriesperfstats"), qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() > 0) << "#queriesperfstats table is empty!"; + ASSERT_GT(qr.Count(), 0) << "#queriesperfstats table is empty!"; for (size_t i = 0; i < qr.Count(); ++i) { std::cout << qr[i].GetItem(false).GetJSON() << std::endl; } } - ASSERT_TRUE(qres.Count() == 1) << "Expected 1 row for this query, got " << qres.Count(); + ASSERT_EQ(qres.Count(), 1); Item item = qres[0].GetItem(false); Variant val; val = item["latency_stddev"]; @@ -415,12 +415,12 @@ TEST_F(NsApi, TestUpdateNonindexedField) { Query updateQuery{Query(default_namespace).Where("id", CondGe, Variant("1500")).Set("nested.bonus", static_cast(100500))}; Error err = rt.reindexer->Update(updateQuery, qrUpdate); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qrUpdate.Count() == 500) << qrUpdate.Count(); + ASSERT_EQ(qrUpdate.Count(), 500); QueryResults qrAll; err = rt.reindexer->Select(Query(default_namespace).Where("id", CondGe, Variant("1500")), qrAll); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qrAll.Count() == 500) << qrAll.Count(); + ASSERT_EQ(qrAll.Count(), 500); for (auto it : qrAll) { Item item = it.GetItem(false); @@ -439,12 +439,12 @@ TEST_F(NsApi, TestUpdateSparseField) { Query updateQuery{Query(default_namespace).Where("id", CondGe, Variant("1500")).Set("sparse_field", static_cast(100500))}; Error err = rt.reindexer->Update(updateQuery, qrUpdate); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qrUpdate.Count() == 500) << qrUpdate.Count(); + ASSERT_EQ(qrUpdate.Count(), 500); QueryResults qrAll; err = rt.reindexer->Select(Query(default_namespace).Where("id", CondGe, Variant("1500")), qrAll); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qrAll.Count() == 500) << qrAll.Count(); + ASSERT_EQ(qrAll.Count(), 500); for (auto it : qrAll) { Item item = it.GetItem(false); @@ -473,7 +473,7 @@ TEST_F(NsApi, TestUpdateTwoFields) { // Make sure query worked well ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qrUpdate.Count() == 1) << qrUpdate.Count(); + ASSERT_EQ(qrUpdate.Count(), 1); // Make sure: // 1. JSON of the item is correct @@ -500,12 +500,12 @@ static void updateArrayField(const std::shared_ptr &reinde Query updateQuery{Query(ns).Where("id", CondGe, Variant("500")).Set(updateFieldPath, values)}; Error err = reindexer->Update(updateQuery, qrUpdate); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qrUpdate.Count() > 0) << qrUpdate.Count(); + ASSERT_GT(qrUpdate.Count(), 0); QueryResults qrAll; err = reindexer->Select(Query(ns).Where("id", CondGe, Variant("500")), qrAll); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qrAll.Count() == qrUpdate.Count()) << qrAll.Count(); + ASSERT_EQ(qrAll.Count(), qrUpdate.Count()); for (auto it : qrAll) { Item item = it.GetItem(false); @@ -537,7 +537,7 @@ TEST_F(NsApi, TestUpdateNonindexedArrayField2) { QueryResults qr; Error err = rt.reindexer->Select(R"(update test_namespace set nested.bonus=[{"first":1,"second":2,"third":3}] where id = 1000;)", qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1) << qr.Count(); + ASSERT_EQ(qr.Count(), 1); Item item = qr[0].GetItem(false); std::string_view json = item.GetJSON(); @@ -553,7 +553,7 @@ TEST_F(NsApi, TestUpdateNonindexedArrayField3) { Error err = rt.reindexer->Select(R"(update test_namespace set nested.bonus=[{"id":1},{"id":2},{"id":3},{"id":4}] where id = 1000;)", qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1) << qr.Count(); + ASSERT_EQ(qr.Count(), 1); Item item = qr[0].GetItem(false); VariantArray val = item["nested.bonus"]; @@ -576,12 +576,12 @@ TEST_F(NsApi, TestUpdateNonindexedArrayField4) { QueryResults qr; Error err = rt.reindexer->Select(R"(update test_namespace set nested.bonus=[0] where id = 1000;)", qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1) << qr.Count(); + ASSERT_EQ(qr.Count(), 1); Item item = qr[0].GetItem(false); std::string_view json = item.GetJSON(); size_t pos = json.find(R"("nested":{"bonus":[0])"); - ASSERT_TRUE(pos != std::string::npos) << "'nested.bonus' was not updated properly" << json; + ASSERT_NE(pos, std::string::npos) << "'nested.bonus' was not updated properly" << json; } TEST_F(NsApi, TestUpdateNonindexedArrayField5) { @@ -611,12 +611,12 @@ TEST_F(NsApi, TestUpdateIndexedArrayField2) { Query q{Query(default_namespace).Where(idIdxName, CondEq, static_cast(1000)).Set(indexedArrayField, std::move(value.MarkArray()))}; Error err = rt.reindexer->Update(q, qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1) << qr.Count(); + ASSERT_EQ(qr.Count(), 1); Item item = qr[0].GetItem(false); std::string_view json = item.GetJSON(); size_t pos = json.find(R"("indexed_array_field":[77])"); - ASSERT_TRUE(pos != std::string::npos) << "'indexed_array_field' was not updated properly" << json; + ASSERT_NE(pos, std::string::npos) << "'indexed_array_field' was not updated properly" << json; } static void addAndSetNonindexedField(const std::shared_ptr &reindexer, const std::string &ns, @@ -844,6 +844,7 @@ TEST_F(NsApi, DropArrayField3) { DropArrayItem(rt.reindexer, default_namespace, "nested.nested_array[*].prices[*]", "nested.nested_array.prices"); } +#if (0) // #1500 TEST_F(NsApi, DropArrayField4) { // 1. Define NS // 2. Fill NS @@ -853,6 +854,7 @@ TEST_F(NsApi, DropArrayField4) { DropArrayItem(rt.reindexer, default_namespace, "nested.nested_array[0].prices[((2+4)*2)/6]", "nested.nested_array.prices", 0, ((2 + 4) * 2) / 6); } +#endif TEST_F(NsApi, SetArrayFieldWithSql) { // 1. Define NS @@ -1692,12 +1694,12 @@ static void checkFieldConversion(const std::shared_ptr &re ASSERT_TRUE(!err.ok()); } else { ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qrUpdate.Count() > 0) << qrUpdate.Count(); + ASSERT_GT(qrUpdate.Count(), 0); QueryResults qrAll; err = reindexer->Select(selectQuery, qrAll); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qrAll.Count() == qrUpdate.Count()) << qrAll.Count(); + ASSERT_EQ(qrAll.Count(), qrUpdate.Count()); for (auto it : qrAll) { Item item = it.GetItem(false); @@ -1845,7 +1847,7 @@ TEST_F(NsApi, TestUpdatePkFieldNoConditions) { QueryResults qr; Error err = rt.reindexer->Select("update test_namespace set id = id + 1;", qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() > 0); + ASSERT_GT(qr.Count(), 0); int i = 1; for (auto &it : qr) { @@ -1862,7 +1864,7 @@ TEST_F(NsApi, TestUpdateIndexArrayWithNull) { QueryResults qr; Error err = rt.reindexer->Select("update test_namespace set indexed_array_field = null where id = 1;", qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1); + ASSERT_EQ(qr.Count(), 1); for (auto &it : qr) { Item item = it.GetItem(false); @@ -1983,7 +1985,7 @@ TEST_F(NsApi, TestUpdateNonIndexFieldWithNull) { QueryResults qr; Error err = rt.reindexer->Select("update test_namespace set extra = null where id = 1001;", qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1); + ASSERT_EQ(qr.Count(), 1); for (auto &it : qr) { Item item = it.GetItem(false); @@ -2008,7 +2010,7 @@ TEST_F(NsApi, TestUpdateEmptyArrayField) { QueryResults qr; Error err = rt.reindexer->Select("update test_namespace set indexed_array_field = [] where id = 1;", qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1); + ASSERT_EQ(qr.Count(), 1); Item item = qr[0].GetItem(false); Variant idFieldVal = item[idIdxName]; @@ -2070,12 +2072,12 @@ TEST_F(NsApi, TestUpdateEmptyIndexedField) { .Set(indexedArrayField, {Variant(static_cast(4)), Variant(static_cast(5)), Variant(static_cast(6))}); Error err = rt.reindexer->Update(q, qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 1); + ASSERT_EQ(qr.Count(), 1); QueryResults qr2; err = rt.reindexer->Select("select * from test_namespace where id = 1001;", qr2); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr2.Count() == 1); + ASSERT_EQ(qr2.Count(), 1); for (auto it : qr2) { Item item = it.GetItem(false); @@ -2100,7 +2102,7 @@ TEST_F(NsApi, TestDropField) { QueryResults qr; Error err = rt.reindexer->Select("update test_namespace drop extra where id >= 1000 and id < 1010;", qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() == 10) << qr.Count(); + ASSERT_EQ(qr.Count(), 10); for (auto it : qr) { Item item = it.GetItem(false); @@ -2112,7 +2114,7 @@ TEST_F(NsApi, TestDropField) { QueryResults qr2; err = rt.reindexer->Select("update test_namespace drop nested.bonus where id >= 1005 and id < 1010;", qr2); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr2.Count() == 5); + ASSERT_EQ(qr2.Count(), 5); for (auto it : qr2) { Item item = it.GetItem(false); @@ -2144,7 +2146,7 @@ TEST_F(NsApi, TestUpdateFieldWithFunction) { Error err = rt.reindexer->Select( "update test_namespace set int_field = SERIAL(), extra = SERIAL(), nested.timeField = NOW(msec) where id >= 0;", qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() > 0); + ASSERT_GT(qr.Count(), 0); int i = 1; for (auto &it : qr) { @@ -2168,7 +2170,7 @@ TEST_F(NsApi, TestUpdateFieldWithExpressions) { "0;", qr); ASSERT_TRUE(err.ok()) << err.what(); - ASSERT_TRUE(qr.Count() > 0) << qr.Count(); + ASSERT_GT(qr.Count(), 0); int i = 1; for (auto &it : qr) { @@ -2207,22 +2209,22 @@ static void checkQueryDsl(const Query &src) { } } if (objectValues) { - EXPECT_TRUE(src.entries == dst.entries); - EXPECT_TRUE(src.aggregations_ == dst.aggregations_); - EXPECT_TRUE(src._namespace == dst._namespace); - EXPECT_TRUE(src.sortingEntries_ == dst.sortingEntries_); - EXPECT_TRUE(src.calcTotal == dst.calcTotal); - EXPECT_TRUE(src.start == dst.start); - EXPECT_TRUE(src.count == dst.count); - EXPECT_TRUE(src.debugLevel == dst.debugLevel); - EXPECT_TRUE(src.strictMode == dst.strictMode); - EXPECT_TRUE(src.forcedSortOrder_ == dst.forcedSortOrder_); - EXPECT_TRUE(src.selectFilter_ == dst.selectFilter_); - EXPECT_TRUE(src.selectFunctions_ == dst.selectFunctions_); - EXPECT_TRUE(src.joinQueries_ == dst.joinQueries_); - EXPECT_TRUE(src.mergeQueries_ == dst.mergeQueries_); + EXPECT_EQ(src.entries, dst.entries); + EXPECT_EQ(src.aggregations_, dst.aggregations_); + EXPECT_EQ(src.NsName(), dst.NsName()); + EXPECT_EQ(src.sortingEntries_, dst.sortingEntries_); + EXPECT_EQ(src.CalcTotal(), dst.CalcTotal()); + EXPECT_EQ(src.Offset(), dst.Offset()); + EXPECT_EQ(src.Limit(), dst.Limit()); + EXPECT_EQ(src.debugLevel, dst.debugLevel); + EXPECT_EQ(src.strictMode, dst.strictMode); + EXPECT_EQ(src.forcedSortOrder_, dst.forcedSortOrder_); + EXPECT_EQ(src.selectFilter_, dst.selectFilter_); + EXPECT_EQ(src.selectFunctions_, dst.selectFunctions_); + EXPECT_EQ(src.joinQueries_, dst.joinQueries_); + EXPECT_EQ(src.mergeQueries_, dst.mergeQueries_); } else { - EXPECT_TRUE(dst == src); + EXPECT_EQ(dst, src); } } @@ -2379,12 +2381,13 @@ TEST_F(NsApi, MsgPackEncodingTest) { ASSERT_TRUE(err.ok()) << err.what(); std::string json(item.GetJSON()); - ASSERT_TRUE(json == items[i++]); + ASSERT_EQ(json, items[i++]); } reindexer::WrSerializer wrSer3; for (size_t i = 0; i < qr.Count(); ++i) { - qr[i].GetMsgPack(wrSer3, false); + const auto err = qr[i].GetMsgPack(wrSer3, false); + ASSERT_TRUE(err.ok()) << err.what(); } i = 0; @@ -2397,7 +2400,7 @@ TEST_F(NsApi, MsgPackEncodingTest) { ASSERT_TRUE(err.ok()) << err.what(); std::string json(item.GetJSON()); - ASSERT_TRUE(json == items[i++]); + ASSERT_EQ(json, items[i++]); } } diff --git a/cpp_src/gtests/tests/unit/queries_test.cc b/cpp_src/gtests/tests/unit/queries_test.cc index 5a9ac6eb7..725fa47d6 100644 --- a/cpp_src/gtests/tests/unit/queries_test.cc +++ b/cpp_src/gtests/tests/unit/queries_test.cc @@ -32,7 +32,7 @@ TEST_F(QueriesApi, QueriesStandardTestSet) { auto& items = insertedItems_[default_namespace]; for (auto it = items.begin(); it != items.end();) { Error err = rt.reindexer->Delete(default_namespace, it->second); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); it = items.erase(it); if (++itemsCount == 4000) break; } @@ -43,7 +43,7 @@ TEST_F(QueriesApi, QueriesStandardTestSet) { itemsCount = 0; for (auto it = items.begin(); it != items.end();) { Error err = rt.reindexer->Delete(default_namespace, it->second); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); it = items.erase(it); if (++itemsCount == 5000) break; } @@ -52,7 +52,7 @@ TEST_F(QueriesApi, QueriesStandardTestSet) { auto itToRemove = items.begin(); if (itToRemove != items.end()) { Error err = rt.reindexer->Delete(default_namespace, itToRemove->second); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); items.erase(itToRemove); } FillDefaultNamespace(rand() % 100, 1, 0); @@ -62,7 +62,7 @@ TEST_F(QueriesApi, QueriesStandardTestSet) { std::advance(itToRemove, rand() % std::min(100, int(items.size()))); if (itToRemove != items.end()) { Error err = rt.reindexer->Delete(default_namespace, itToRemove->second); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); items.erase(itToRemove); } } @@ -70,7 +70,7 @@ TEST_F(QueriesApi, QueriesStandardTestSet) { for (auto it = items.begin(); it != items.end();) { Error err = rt.reindexer->Delete(default_namespace, it->second); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); it = items.erase(it); } @@ -105,6 +105,18 @@ TEST_F(QueriesApi, QueriesConditions) { CheckConditions(); } +#if !defined(REINDEX_WITH_TSAN) +TEST_F(QueriesApi, UuidQueries) { + FillUUIDNs(); + // hack to obtain not index not string uuid fields + /*auto err = rt.reindexer->DropIndex(uuidNs, {kFieldNameUuidNotIndex2}); // TODO uncomment this #1470 + ASSERT_TRUE(err.ok()) << err.what(); + err = rt.reindexer->DropIndex(uuidNs, {kFieldNameUuidNotIndex3}); + ASSERT_TRUE(err.ok()) << err.what();*/ + CheckUUIDQueries(); +} +#endif + TEST_F(QueriesApi, IndexCacheInvalidationTest) { std::vector> data{{0, 10}, {1, 9}, {2, 8}, {3, 7}, {4, 6}, {5, 5}, {6, 4}, {7, 3}, {8, 2}, {9, 1}, {10, 0}, {11, -1}}; @@ -340,15 +352,15 @@ TEST_F(QueriesApi, StrictModeTest) { const std::string kNotExistingField = "some_random_name123"; QueryResults qr; { - Query query = Query(testSimpleNs).Where(kNotExistingField, CondEmpty, 0); + Query query = Query(testSimpleNs).Where(kNotExistingField, CondEmpty, {}); Error err = rt.reindexer->Select(query.Strict(StrictModeNames), qr); - EXPECT_EQ(err.code(), errParams); + EXPECT_EQ(err.code(), errQueryExec); qr.Clear(); err = rt.reindexer->Select(query.Strict(StrictModeIndexes), qr); - EXPECT_EQ(err.code(), errParams); + EXPECT_EQ(err.code(), errQueryExec); qr.Clear(); err = rt.reindexer->Select(query.Strict(StrictModeNone), qr); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); Verify(qr, Query(testSimpleNs), *rt.reindexer); qr.Clear(); } @@ -356,13 +368,13 @@ TEST_F(QueriesApi, StrictModeTest) { { Query query = Query(testSimpleNs).Where(kNotExistingField, CondEq, 0); Error err = rt.reindexer->Select(query.Strict(StrictModeNames), qr); - EXPECT_EQ(err.code(), errParams); + EXPECT_EQ(err.code(), errQueryExec); qr.Clear(); err = rt.reindexer->Select(query.Strict(StrictModeIndexes), qr); - EXPECT_EQ(err.code(), errParams); + EXPECT_EQ(err.code(), errQueryExec); qr.Clear(); err = rt.reindexer->Select(query.Strict(StrictModeNone), qr); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); EXPECT_EQ(qr.Count(), 0); } } @@ -453,6 +465,7 @@ TEST_F(QueriesApi, JoinByNotIndexField) { ASSERT_EQ(qr.Count(), sizeof(expectedIds) / sizeof(int)); for (size_t i = 0; i < qr.Count(); ++i) { Item item = qr[i].GetItem(false); + ASSERT_TRUE(item.Status().ok()) << item.Status().what(); VariantArray values = item["id"]; ASSERT_EQ(values.size(), 1); EXPECT_EQ(values[0].As(), expectedIds[i]); @@ -807,14 +820,18 @@ TEST_F(QueriesApi, ConvertationStringToDoubleDuringSorting) { std::string print(const reindexer::Query& q, reindexer::QueryResults::Iterator& currIt, reindexer::QueryResults::Iterator& prevIt, const reindexer::QueryResults& qr) { + assertrx(currIt.Status().ok()); std::string res = '\n' + q.GetSQL() + "\ncurr: "; reindexer::WrSerializer ser; - currIt.GetJSON(ser, false); + const auto err = currIt.GetJSON(ser, false); + assertrx(err.ok()); res += ser.Slice(); if (prevIt != qr.end()) { + assertrx(prevIt.Status().ok()); res += "\nprev: "; ser.Reset(); - prevIt.GetJSON(ser, false); + const auto err = prevIt.GetJSON(ser, false); + assertrx(err.ok()); res += ser.Slice(); } return res; diff --git a/cpp_src/gtests/tests/unit/replication_master_master_test.cc b/cpp_src/gtests/tests/unit/replication_master_master_test.cc index 96aa46cb4..b620b6109 100644 --- a/cpp_src/gtests/tests/unit/replication_master_master_test.cc +++ b/cpp_src/gtests/tests/unit/replication_master_master_test.cc @@ -431,6 +431,105 @@ TEST_F(ReplicationSlaveSlaveApi, TransactionTest) { for (auto& node : nodes) node.Stop(); } +TEST_F(ReplicationSlaveSlaveApi, TransactionCopyPolicyForceSync) { + // Check transactions copy policy after force sync + /* + m + | + 1 + | + 2 + */ + constexpr std::string_view kJsonCfgNss = R"=({ + "namespaces": [ + { + "namespace": "*", + "start_copy_policy_tx_size": 10000, + "copy_policy_multiplier": 5, + "tx_size_to_always_copy": 100000 + }, + { + "namespace": "ns1", + "start_copy_policy_tx_size": 10000, + "copy_policy_multiplier": 5, + "tx_size_to_always_copy": 1 + } + ], + "type": "namespaces" + })="; + constexpr int port = 9999; + const std::string kBaseDbPath(fs::JoinPath(kBaseTestsetDbPath, "TransactionCopyPolicyForceSync")); + const std::string kDbPathMaster(kBaseDbPath + "/test_"); + constexpr int serverId = 5; + constexpr size_t kRows = 100; + const std::string nsName("ns1"); + + std::vector slaveConfiguration = {-1, port, port + 1}; + std::vector nodes; + for (size_t i = 0; i < slaveConfiguration.size(); i++) { + nodes.emplace_back(); + nodes.back().InitServer(i, port + i, port + 1000 + i, kDbPathMaster + std::to_string(i), "db", true); + nodes.back().Get()->EnableAllProfilings(); + } + + // Set tx copy policy for the node '2' to 'always copy' + { + auto item = nodes[2].Get()->api.reindexer->NewItem("#config"); + ASSERT_TRUE(item.Status().ok()) << item.Status().what(); + auto err = item.FromJSON(kJsonCfgNss); + ASSERT_TRUE(err.ok()) << err.what(); + err = nodes[2].Get()->api.reindexer->Upsert("#config", item); + ASSERT_TRUE(err.ok()) << err.what(); + } + + for (size_t i = 0; i < slaveConfiguration.size(); i++) { + if (i == 0) { + ReplicationConfigTest config("master"); + nodes[i].Get()->MakeMaster(config); + } else { + std::string masterDsn = "cproto://127.0.0.1:" + std::to_string(slaveConfiguration[i]) + "/db"; + ReplicationConfigTest config("slave", false, true, serverId + i, masterDsn); + nodes[i].Get()->MakeSlave(slaveConfiguration[i], config); + } + } + nodes[2].Drop(); + + ServerControl& master = nodes[0]; + TestNamespace1 ns1(master, nsName); + WaitSync(nodes[0], nodes[1], nsName); + + // Restart node '2' + nodes[2].InitServer(2, port + 2, port + 1000 + 2, kDbPathMaster + std::to_string(2), "db", true); + std::string masterDsn = "cproto://127.0.0.1:" + std::to_string(slaveConfiguration[2]) + "/db"; + ReplicationConfigTest config("slave", false, true, serverId + 2, masterDsn); + nodes[2].Get()->MakeSlave(1, config); + WaitSync(nodes[0], nodes[2], nsName); + + // Apply tx + reindexer::client::SyncCoroTransaction tr = master.Get()->api.reindexer->NewTransaction(nsName); + for (unsigned int i = 0; i < kRows; i++) { + reindexer::client::Item item = tr.NewItem(); + auto err = item.FromJSON("{\"id\":" + std::to_string(i + kRows * 10) + "}"); + tr.Upsert(std::move(item)); + } + master.Get()->api.reindexer->CommitTransaction(tr); + WaitSync(nodes[0], nodes[2], nsName); + + // Check copy tx event in the perfstats + client::SyncCoroQueryResults qr(nodes[2].Get()->api.reindexer.get()); + auto err = nodes[2].Get()->api.reindexer->Select("select * from #perfstats", qr); + ASSERT_TRUE(err.ok()) << err.what(); + ASSERT_EQ(qr.Count(), 1); + WrSerializer ser; + err = qr.begin().GetJSON(ser, false); + ASSERT_TRUE(err.ok()) << err.what(); + gason::JsonParser parser; + auto resJS = parser.Parse(ser.Slice()); + ASSERT_EQ(resJS["transactions"]["total_copy_count"].As(-1), 1) << ser.Slice(); + + for (auto& node : nodes) node.Stop(); +} + TEST_F(ReplicationSlaveSlaveApi, ForceSync3Node) { // Check force-sync for cascade setup /* diff --git a/cpp_src/gtests/tests/unit/rpcclient_test.cc b/cpp_src/gtests/tests/unit/rpcclient_test.cc index a6c1027ee..9ea0b80b9 100644 --- a/cpp_src/gtests/tests/unit/rpcclient_test.cc +++ b/cpp_src/gtests/tests/unit/rpcclient_test.cc @@ -20,11 +20,11 @@ TEST_F(RPCClientTestApi, ConnectTimeout) { config.RequestTimeout = seconds(5); reindexer::client::Reindexer rx(config); auto res = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db"); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); res = rx.AddNamespace(reindexer::NamespaceDef("MyNamespace")); EXPECT_EQ(res.code(), errTimeout); res = StopServer(); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); } TEST_F(RPCClientTestApi, RequestTimeout) { @@ -35,14 +35,14 @@ TEST_F(RPCClientTestApi, RequestTimeout) { config.RequestTimeout = seconds(3); reindexer::client::Reindexer rx(config); auto res = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db"); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); const std::string kNamespaceName = "MyNamespace"; res = rx.AddNamespace(reindexer::NamespaceDef(kNamespaceName)); EXPECT_EQ(res.code(), errTimeout); res = rx.DropNamespace(kNamespaceName); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); res = StopServer(); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); } TEST_F(RPCClientTestApi, RequestCancels) { @@ -50,7 +50,7 @@ TEST_F(RPCClientTestApi, RequestCancels) { StartServer(); reindexer::client::Reindexer rx; auto res = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db"); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); { CancelRdxContext ctx; @@ -72,7 +72,7 @@ TEST_F(RPCClientTestApi, RequestCancels) { } res = StopServer(); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); } TEST_F(RPCClientTestApi, SuccessfullRequestWithTimeout) { @@ -83,22 +83,23 @@ TEST_F(RPCClientTestApi, SuccessfullRequestWithTimeout) { config.RequestTimeout = seconds(6); reindexer::client::Reindexer rx(config); auto res = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db"); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); res = rx.AddNamespace(reindexer::NamespaceDef("MyNamespace")); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); res = StopServer(); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); } TEST_F(RPCClientTestApi, ErrorLoginResponse) { AddFakeServer(); StartServer(kDefaultRPCServerAddr, errForbidden); reindexer::client::Reindexer rx; - rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db"); - auto res = rx.AddNamespace(reindexer::NamespaceDef("MyNamespace")); - EXPECT_EQ(res.code(), errForbidden) << res.what(); - res = StopServer(); - EXPECT_TRUE(res.ok()) << res.what(); + auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db"); + ASSERT_TRUE(err.ok()) << err.what(); + err = rx.AddNamespace(reindexer::NamespaceDef("MyNamespace")); + EXPECT_EQ(err.code(), errForbidden) << err.what(); + err = StopServer(); + ASSERT_TRUE(err.ok()) << err.what(); } TEST_F(RPCClientTestApi, SeveralDsnReconnect) { @@ -126,7 +127,7 @@ TEST_F(RPCClientTestApi, SeveralDsnReconnect) { connectData.emplace_back(std::string().append(cprotoIdentifier).append(uri).append(dbName), reindexer::client::ConnectOpts()); } auto res = rx.Connect(connectData); - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); for (size_t i = 0; i < 100; ++i) { if (CheckIfFakeServerConnected(uris[0])) break; @@ -147,7 +148,7 @@ TEST_F(RPCClientTestApi, SeveralDsnReconnect) { if (res.ok()) break; std::this_thread::sleep_for(std::chrono::milliseconds(100)); } - EXPECT_TRUE(res.ok()) << res.what(); + ASSERT_TRUE(res.ok()) << res.what(); } StopAllServers(); } @@ -209,7 +210,7 @@ TEST_F(RPCClientTestApi, SelectFromClosedNamespace) { loop.run(); ASSERT_TRUE(finished); Error err = StopServer(); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); } TEST_F(RPCClientTestApi, RenameNamespace) { @@ -288,7 +289,7 @@ TEST_F(RPCClientTestApi, RenameNamespace) { loop.run(); ASSERT_TRUE(finished); Error err = StopServer(); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); } TEST_F(RPCClientTestApi, CoroRequestTimeout) { @@ -305,19 +306,19 @@ TEST_F(RPCClientTestApi, CoroRequestTimeout) { config.RequestTimeout = seconds(1); reindexer::client::CoroReindexer rx(config); auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db", loop); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); const std::string kNamespaceName = "MyNamespace"; err = rx.AddNamespace(reindexer::NamespaceDef(kNamespaceName)); EXPECT_EQ(err.code(), errTimeout); loop.sleep(std::chrono::seconds(4)); err = rx.DropNamespace(kNamespaceName); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); finished = true; }); loop.run(); ASSERT_TRUE(finished); Error err = StopServer(); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); } static std::chrono::seconds GetMaxTimeForCoroSelectTimeout(unsigned requests, std::chrono::seconds delay) { @@ -391,16 +392,16 @@ TEST_F(RPCClientTestApi, CoroSelectTimeout) { [&] { return server.CloseQRRequestsCount() >= kCorCount * kQueriesCount; }); EXPECT_EQ(server.CloseQRRequestsCount(), kCorCount * kQueriesCount); err = rx.AddNamespace(reindexer::NamespaceDef(kNamespaceName + std::to_string(index))); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); finished[index] = true; }); } loop.run(); for (size_t i = 0; i < kCorCount; ++i) { - EXPECT_TRUE(finished[i]); + ASSERT_TRUE(finished[i]); } Error const err = StopServer(); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); } TEST_F(RPCClientTestApi, CoroRequestCancels) { @@ -412,7 +413,7 @@ TEST_F(RPCClientTestApi, CoroRequestCancels) { loop.spawn([&loop, &finished] { reindexer::client::CoroReindexer rx; auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db", loop); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); { CancelRdxContext ctx; @@ -440,7 +441,7 @@ TEST_F(RPCClientTestApi, CoroRequestCancels) { loop.run(); ASSERT_TRUE(finished); Error err = StopServer(); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); } TEST_F(RPCClientTestApi, CoroSuccessfullRequestWithTimeout) { @@ -455,15 +456,15 @@ TEST_F(RPCClientTestApi, CoroSuccessfullRequestWithTimeout) { config.RequestTimeout = seconds(6); reindexer::client::CoroReindexer rx(config); auto err = rx.Connect(std::string("cproto://") + kDefaultRPCServerAddr + "/test_db", loop); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rx.AddNamespace(reindexer::NamespaceDef("MyNamespace")); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); finished = true; }); loop.run(); ASSERT_TRUE(finished); Error err = StopServer(); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); } TEST_F(RPCClientTestApi, CoroErrorLoginResponse) { @@ -483,7 +484,7 @@ TEST_F(RPCClientTestApi, CoroErrorLoginResponse) { loop.run(); ASSERT_TRUE(finished); Error err = StopServer(); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); } TEST_F(RPCClientTestApi, CoroStatus) { @@ -504,7 +505,7 @@ TEST_F(RPCClientTestApi, CoroStatus) { err = rx.Status(); ASSERT_TRUE(err.ok()) << err.what(); err = StopServer(); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); loop.sleep(std::chrono::milliseconds(20)); // Allow reading coroutine to handle disconnect err = rx.Status(); ASSERT_EQ(err.code(), errNetwork) << err.what(); @@ -621,8 +622,7 @@ TEST_F(RPCClientTestApi, CoroUpserts) { for (auto& it : qr) { ASSERT_TRUE(it.Status().ok()) << it.Status().what(); } - err = rx.Stop(); - ASSERT_TRUE(err.ok()) << err.what(); + rx.Stop(); finished = true; }); @@ -713,8 +713,7 @@ TEST_F(RPCClientTestApi, ServerRestart) { ready = true; wg.wait(); - err = rx.Stop(); - ASSERT_TRUE(err.ok()) << err.what(); + rx.Stop(); finished = true; }); @@ -728,7 +727,7 @@ TEST_F(RPCClientTestApi, ServerRestart) { // Shutdown server step = Step::ShutdownInProgress; Error err = StopServer(); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); step = Step::ShutdownDone; std::this_thread::sleep_for(std::chrono::milliseconds(300)); @@ -861,8 +860,7 @@ TEST_F(RPCClientTestApi, CoroUpdatesFilteringByNs) { ASSERT_TRUE(reciever2.AwaitNamespaces(0)); ASSERT_TRUE(reciever3.AwaitNamespaces(0)); } - err = rx.Stop(); - ASSERT_TRUE(err.ok()) << err.what(); + rx.Stop(); err = rx.Status(); ASSERT_FALSE(err.ok()) << err.what(); @@ -964,8 +962,7 @@ TEST_F(RPCClientTestApi, FetchingWithJoin) { EXPECT_EQ(ser.Slice(), expected); i++; } - err = rx.Stop(); - ASSERT_TRUE(err.ok()) << err.what(); + rx.Stop(); }); loop.run(); @@ -1044,8 +1041,7 @@ TEST_F(RPCClientTestApi, AggregationsFetching) { } } - err = rx.Stop(); - ASSERT_TRUE(err.ok()) << err.what(); + rx.Stop(); }); loop.run(); diff --git a/cpp_src/gtests/tests/unit/selector_plan_test.cc b/cpp_src/gtests/tests/unit/selector_plan_test.cc index 5c4c09368..89a08333a 100644 --- a/cpp_src/gtests/tests/unit/selector_plan_test.cc +++ b/cpp_src/gtests/tests/unit/selector_plan_test.cc @@ -421,7 +421,7 @@ TEST_F(SelectorPlanTest, ConditionsMergeIntoEmptyCondition) { .Where("id", CondSet, {32, 33, 34}) .Where("id", CondEq, 310) .Where("id", CondSet, {35, 36, 37}) - .Where("value", CondAny, 0) + .Where("value", CondAny, {}) .Explain(); QueryResults qr; err = rt.reindexer->Select(q, qr); @@ -439,7 +439,7 @@ TEST_F(SelectorPlanTest, ConditionsMergeIntoEmptyCondition) { .Where("id", CondEq, 39) .Where("id", CondSet, {32, 39, 34}) .Where("id", CondSet, {}) - .Where("value", CondAny, 0) + .Where("value", CondAny, {}) .Explain(); QueryResults qr; err = rt.reindexer->Select(q, qr); @@ -454,7 +454,7 @@ TEST_F(SelectorPlanTest, ConditionsMergeIntoEmptyCondition) { { // Query with multiple empty sets const auto q = - Query(nsName).Where("id", CondEq, 45).Where("id", CondSet, {}).Where("id", CondSet, {}).Where("value", CondAny, 0).Explain(); + Query(nsName).Where("id", CondEq, 45).Where("id", CondSet, {}).Where("id", CondSet, {}).Where("value", CondAny, {}).Explain(); QueryResults qr; err = rt.reindexer->Select(q, qr); ASSERT_TRUE(err.ok()) << err.what(); diff --git a/cpp_src/gtests/tests/unit/tolal_lru_cache.cc b/cpp_src/gtests/tests/unit/tolal_lru_cache.cc index 970bf5a2c..64a1dd036 100644 --- a/cpp_src/gtests/tests/unit/tolal_lru_cache.cc +++ b/cpp_src/gtests/tests/unit/tolal_lru_cache.cc @@ -12,9 +12,9 @@ using reindexer::Query; using reindexer::WrSerializer; using reindexer::Serializer; -using reindexer::QueryTotalCountCache; +using reindexer::QueryCountCache; using reindexer::QueryCacheKey; -using reindexer::QueryTotalCountCacheVal; +using reindexer::QueryCountCacheVal; using reindexer::EqQueryCacheKey; TEST(LruCache, SimpleTest) { @@ -31,7 +31,7 @@ TEST(LruCache, SimpleTest) { qs.emplace_back(Query("namespace" + idx), false); } - QueryTotalCountCache cache; + QueryCountCache cache(reindexer::kDefaultCacheSizeLimit, reindexer::kDefaultHitCountToCache); auto keyComparator = EqQueryCacheKey(); PRINTF("checking query cache ...\n"); @@ -48,7 +48,7 @@ TEST(LruCache, SimpleTest) { ASSERT_TRUE(keyComparator(k, ckey)) << "queries are not EQUAL!\n"; } else { size_t total = static_cast(rand() % 1000); - cache.Put(ckey, QueryTotalCountCacheVal{total}); + cache.Put(ckey, QueryCountCacheVal{total}); qs[idx].second = true; } } @@ -69,7 +69,7 @@ TEST(LruCache, StressTest) { allocdebug_init_mt(); size_t memoryCheckpoint = get_alloc_size(); - QueryTotalCountCache cache(cacheSize); + QueryCountCache cache(cacheSize, reindexer::kDefaultHitCountToCache); PRINTF("preparing queries for caching ...\n"); for (auto i = 0; i < nsCount; i++) { @@ -94,7 +94,7 @@ TEST(LruCache, StressTest) { ASSERT_TRUE(EqQueryCacheKey()(qs[idx], ckey)) << "queries are not EQUAL!\n"; } else { size_t total = static_cast(rand() % 1000); - cache.Put(ckey, QueryTotalCountCacheVal{total}); + cache.Put(ckey, QueryCountCacheVal{total}); } } }); diff --git a/cpp_src/gtests/tests/unit/value_by_json_path.cc b/cpp_src/gtests/tests/unit/value_by_json_path.cc index 5d2797dbc..5bc6f8662 100644 --- a/cpp_src/gtests/tests/unit/value_by_json_path.cc +++ b/cpp_src/gtests/tests/unit/value_by_json_path.cc @@ -1,16 +1,15 @@ #include "core/cjson/jsonbuilder.h" #include "reindexer_api.h" -#include "tools/logger.h" TEST_F(ReindexerApi, GetValueByJsonPath) { Error err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "string", IndexOpts().PK()}); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); struct Data { std::string id; @@ -26,7 +25,7 @@ TEST_F(ReindexerApi, GetValueByJsonPath) { for (int i = 0; i < 100; ++i) { Item item = rt.reindexer->NewItem(default_namespace); - EXPECT_TRUE(item.Status().ok()) << item.Status().what(); + ASSERT_TRUE(item.Status().ok()) << item.Status().what(); Data data = {"pk" + std::to_string(i), i + 1, "str" + std::to_string(i + 2), {{i + 3, i + 4, i + 5}}, i + 6, i + 7, i + 8}; char json[1024]; @@ -34,58 +33,58 @@ TEST_F(ReindexerApi, GetValueByJsonPath) { data.intArray[1], data.intArray[2], data.firstInner, data.secondInner, data.thirdInner); err = item.FromJSON(json); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); VariantArray intField = item["inner.intField"]; - EXPECT_TRUE(intField.size() == 1); - EXPECT_TRUE(static_cast(intField[0]) == data.intField); + ASSERT_EQ(intField.size(), 1); + EXPECT_EQ(static_cast(intField[0]), data.intField); VariantArray stringField = item["inner.stringField"]; - EXPECT_TRUE(stringField.size() == 1); - EXPECT_TRUE(stringField[0].As().compare(data.stringField) == 0); + ASSERT_EQ(stringField.size(), 1); + EXPECT_EQ(stringField[0].As().compare(data.stringField), 0); VariantArray intArray = item["inner.inner2.intArray"]; - EXPECT_TRUE(intArray.size() == 3); + ASSERT_EQ(intArray.size(), data.intArray.size()); for (size_t j = 0; j < intArray.size(); ++j) { - EXPECT_TRUE(static_cast(intArray[j]) == data.intArray[j]); + EXPECT_EQ(static_cast(intArray[j]), data.intArray[j]); } VariantArray firstInner = item["inner.inner2.inner3.first"]; - EXPECT_TRUE(firstInner.size() == 1); - EXPECT_TRUE(static_cast(firstInner[0]) == data.firstInner); + ASSERT_EQ(firstInner.size(), 1); + EXPECT_EQ(static_cast(firstInner[0]), data.firstInner); VariantArray secondInner = item["inner.inner2.inner3.second"]; - EXPECT_TRUE(secondInner.size() == 1); - EXPECT_TRUE(static_cast(secondInner[0]) == data.secondInner); + ASSERT_EQ(secondInner.size(), 1); + EXPECT_EQ(static_cast(secondInner[0]), data.secondInner); VariantArray thirdInner = item["inner.inner2.inner3.third"]; - EXPECT_TRUE(thirdInner.size() == 1); - EXPECT_TRUE(static_cast(thirdInner[0]) == data.thirdInner); + ASSERT_EQ(thirdInner.size(), 1); + EXPECT_EQ(static_cast(thirdInner[0]), data.thirdInner); } } TEST_F(ReindexerApi, SelectByJsonPath) { Error err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "string", IndexOpts().PK()}); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); const char jsonPattern[] = R"xxx({"id": "%s", "nested": {"string": "%s", "int": %d, "intarray" : [1,2,3]}})xxx"; std::vector properIntValues; for (int i = 0; i < 15; ++i) { Item item = rt.reindexer->NewItem(default_namespace); - EXPECT_TRUE(item.Status().ok()) << item.Status().what(); + ASSERT_TRUE(item.Status().ok()) << item.Status().what(); char json[512]; auto pk = "pk" + std::to_string(i); @@ -95,97 +94,97 @@ TEST_F(ReindexerApi, SelectByJsonPath) { if (i >= 5) properIntValues.push_back(i); err = item.FromJSON(json); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); } QueryResults qr1; Variant strValueToFind("str_pk1"); Query query1{Query(default_namespace).Where("nested.string", CondEq, strValueToFind)}; err = rt.reindexer->Select(query1, qr1); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(qr1.Count() == 1); + ASSERT_TRUE(err.ok()) << err.what(); + ASSERT_EQ(qr1.Count(), 1); Item theOnlyItem = qr1[0].GetItem(false); VariantArray krefs = theOnlyItem["nested.string"]; - EXPECT_TRUE(krefs.size() == 1); - EXPECT_TRUE(krefs[0].As() == strValueToFind.As()); + ASSERT_EQ(krefs.size(), 1); + EXPECT_EQ(krefs[0].As(), strValueToFind.As()); QueryResults qr2; Variant intValueToFind(static_cast(5)); Query query2{Query(default_namespace).Where("nested.int", CondGe, intValueToFind)}; err = rt.reindexer->Select(query2, qr2); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(qr2.Count() == 10); + ASSERT_TRUE(err.ok()) << err.what(); + ASSERT_EQ(qr2.Count(), properIntValues.size()); - EXPECT_TRUE(properIntValues.size() == qr2.Count()); + ASSERT_EQ(properIntValues.size(), qr2.Count()); for (size_t i = 0; i < properIntValues.size(); ++i) { Item item = qr2[i].GetItem(false); VariantArray krefs = item["nested.int"]; - EXPECT_TRUE(krefs.size() == 1); - EXPECT_TRUE(static_cast(krefs[0]) == properIntValues[i]); + ASSERT_EQ(krefs.size(), 1); + EXPECT_EQ(static_cast(krefs[0]), properIntValues[i]); } QueryResults qr3; Variant arrayItemToFind(static_cast(2)); Query query3{Query(default_namespace).Where("nested.intarray", CondGe, arrayItemToFind)}; err = rt.reindexer->Select(query3, qr3); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(qr3.Count() == 15); + ASSERT_TRUE(err.ok()) << err.what(); + EXPECT_EQ(qr3.Count(), 15); } TEST_F(ReindexerApi, CompositeFTSelectByJsonPath) { Error err = rt.reindexer->OpenNamespace(default_namespace, StorageOpts().Enabled(false)); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->AddIndex(default_namespace, {"id", "hash", "string", IndexOpts().PK()}); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->AddIndex(default_namespace, {"locale", "hash", "string", IndexOpts()}); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); const char jsonPattern[] = R"xxx({"id": "key%d", "locale" : "%s", "nested": {"name": "name%d", "count": %ld}})xxx"; for (int i = 0; i < 20'000; ++i) { Item item = rt.reindexer->NewItem(default_namespace); - EXPECT_TRUE(item.Status().ok()) << item.Status().what(); + ASSERT_TRUE(item.Status().ok()) << item.Status().what(); char json[1024]; long count = i; snprintf(json, sizeof(json) - 1, jsonPattern, i, i % 2 ? "en" : "ru", i, count); err = item.Unsafe(true).FromJSON(json); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->Upsert(default_namespace, item); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); } err = rt.reindexer->AddIndex(default_namespace, {"composite_ft", {"nested.name", "id", "locale"}, "text", "composite", IndexOpts()}); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); err = rt.reindexer->Commit(default_namespace); - EXPECT_TRUE(err.ok()) << err.what(); + ASSERT_TRUE(err.ok()) << err.what(); QueryResults qr; Query query{Query(default_namespace).Where("composite_ft", CondEq, "name2")}; err = rt.reindexer->Select(query, qr); - EXPECT_TRUE(err.ok()) << err.what(); - EXPECT_TRUE(qr.Count() == 1); + ASSERT_TRUE(err.ok()) << err.what(); + EXPECT_EQ(qr.Count(), 1); for (auto it : qr) { Item ritem(it.GetItem(false)); auto json = ritem.GetJSON(); - EXPECT_TRUE(json == R"xxx({"id":"key2","locale":"ru","nested":{"name":"name2","count":2}})xxx"); + EXPECT_EQ(json, R"xxx({"id":"key2","locale":"ru","nested":{"name":"name2","count":2}})xxx"); } } diff --git a/cpp_src/gtests/tools.h b/cpp_src/gtests/tools.h index 65f3a90b9..7095193b9 100644 --- a/cpp_src/gtests/tools.h +++ b/cpp_src/gtests/tools.h @@ -27,3 +27,67 @@ inline std::string randStrUuid() { inline reindexer::Uuid randUuid() { return reindexer::Uuid{randStrUuid()}; } inline reindexer::Uuid nilUuid() { return reindexer::Uuid{nilUUID}; } + +template +inline reindexer::VariantArray randUuidArrayImpl(Fn fillFn, size_t min, size_t max) { + assert(min <= max); + reindexer::VariantArray ret; + const size_t count = min == max ? min : min + rand() % (max - min); + ret.reserve(count); + for (size_t i = 0; i < count; ++i) { + fillFn(ret); + } + return ret; +} + +inline reindexer::VariantArray randUuidArray(size_t min, size_t max) { + return randUuidArrayImpl([](auto& v) { v.emplace_back(randUuid()); }, min, max); +} + +inline reindexer::VariantArray randStrUuidArray(size_t min, size_t max) { + return randUuidArrayImpl([](auto& v) { v.emplace_back(randStrUuid()); }, min, max); +} + +inline reindexer::VariantArray randHeterogeneousUuidArray(size_t min, size_t max) { + return randUuidArrayImpl( + [](auto& v) { + if (rand() % 2) { + v.emplace_back(randStrUuid()); + } else { + v.emplace_back(randUuid()); + } + }, + min, max); +} + +inline auto minMaxArgs(CondType cond, size_t max) { + struct { + size_t min; + size_t max; + } res; + switch (cond) { + case CondEq: + case CondSet: + case CondAllSet: + res.min = 0; + res.max = max; + break; + case CondLike: + case CondLt: + case CondLe: + case CondGt: + case CondGe: + res.min = res.max = 1; + break; + case CondRange: + res.min = res.max = 2; + break; + case CondAny: + case CondEmpty: + res.min = res.max = 0; + break; + case CondDWithin: + assert(0); + } + return res; +} diff --git a/cpp_src/net/connectinstatscollector.cc b/cpp_src/net/connectinstatscollector.cc index da2803992..d60608122 100644 --- a/cpp_src/net/connectinstatscollector.cc +++ b/cpp_src/net/connectinstatscollector.cc @@ -26,27 +26,6 @@ void connection_stats_collector::restart() { void connection_stats_collector::stop() noexcept { stats_update_timer_.stop(); } -void connection_stats_collector::update_read_stats(ssize_t nread) noexcept { - stat_->recv_bytes.fetch_add(nread, std::memory_order_relaxed); - auto now = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()); - stat_->last_recv_ts.store(now.count(), std::memory_order_relaxed); -} - -void connection_stats_collector::update_write_stats(ssize_t written, size_t send_buf_size) noexcept { - stat_->sent_bytes.fetch_add(written, std::memory_order_relaxed); - auto now = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()); - stat_->last_send_ts.store(now.count(), std::memory_order_relaxed); - stat_->send_buf_bytes.store(send_buf_size, std::memory_order_relaxed); -} - -void connection_stats_collector::update_pended_updates(size_t count) noexcept { - stat_->pended_updates.store(count, std::memory_order_relaxed); -} - -void connection_stats_collector::update_send_buf_size(size_t size) noexcept { - stat_->send_buf_bytes.store(size, std::memory_order_relaxed); -} - void connection_stats_collector::stats_check_cb(ev::periodic&, int) noexcept { assertrx(stat_); const uint64_t kAvgPeriod = 10; diff --git a/cpp_src/net/connectinstatscollector.h b/cpp_src/net/connectinstatscollector.h index 63c356ed4..5cddb8cd1 100644 --- a/cpp_src/net/connectinstatscollector.h +++ b/cpp_src/net/connectinstatscollector.h @@ -35,10 +35,19 @@ class connection_stats_collector { void detach() noexcept; void restart(); void stop() noexcept; - void update_read_stats(ssize_t nread) noexcept; - void update_write_stats(ssize_t written, size_t send_buf_size) noexcept; - void update_pended_updates(size_t) noexcept; - void update_send_buf_size(size_t) noexcept; + void update_read_stats(ssize_t nread) noexcept { + stat_->recv_bytes.fetch_add(nread, std::memory_order_relaxed); + auto now = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()); + stat_->last_recv_ts.store(now.count(), std::memory_order_relaxed); + } + void update_write_stats(ssize_t written, size_t send_buf_size) noexcept { + stat_->sent_bytes.fetch_add(written, std::memory_order_relaxed); + auto now = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()); + stat_->last_send_ts.store(now.count(), std::memory_order_relaxed); + stat_->send_buf_bytes.store(send_buf_size, std::memory_order_relaxed); + } + void update_pended_updates(size_t count) noexcept { stat_->pended_updates.store(count, std::memory_order_relaxed); } + void update_send_buf_size(size_t size) noexcept { stat_->send_buf_bytes.store(size, std::memory_order_relaxed); } protected: void stats_check_cb(ev::periodic &watcher, int) noexcept; diff --git a/cpp_src/net/connection.cc b/cpp_src/net/connection.cc index 6187fba4a..14ac8c8cd 100644 --- a/cpp_src/net/connection.cc +++ b/cpp_src/net/connection.cc @@ -5,28 +5,34 @@ namespace reindexer { namespace net { template -Connection::Connection(int fd, ev::dynamic_loop &loop, bool enableStat, size_t readBufSize, size_t writeBufSize) - : sock_(fd), curEvents_(0), wrBuf_(writeBufSize), rdBuf_(readBufSize), stats_(enableStat ? new connection_stats_collector : nullptr) { +Connection::Connection(socket &&s, ev::dynamic_loop &loop, bool enableStat, size_t readBufSize, size_t writeBufSize, int idleTimeout) + : sock_(std::move(s)), + curEvents_(0), + wrBuf_(writeBufSize), + rdBuf_(readBufSize), + stats_(enableStat ? new connection_stats_collector : nullptr), + kIdleCheckPeriod_(idleTimeout) { attach(loop); + restartIdleCheckTimer(); } template Connection::~Connection() { - if (sock_.valid()) { - io_.stop(); - sock_.close(); - } + io_.stop(); } template -void Connection::restart(int fd) { +void Connection::restart(socket &&s) { assertrx(!sock_.valid()); - sock_ = fd; + sock_ = std::move(s); wrBuf_.clear(); rdBuf_.clear(); curEvents_ = 0; closeConn_ = false; - if (stats_) stats_->restart(); + if (stats_) { + stats_->restart(); + } + restartIdleCheckTimer(); } template @@ -42,7 +48,10 @@ void Connection::attach(ev::dynamic_loop &loop) { timeout_.set(loop); async_.set(this); async_.set(loop); - if (stats_) stats_->attach(loop); + if (stats_) { + stats_->attach(loop); + } + restartIdleCheckTimer(); attached_ = true; } @@ -55,7 +64,9 @@ void Connection::detach() { timeout_.reset(); async_.stop(); async_.reset(); - if (stats_) stats_->detach(); + if (stats_) { + stats_->detach(); + } attached_ = false; } @@ -64,7 +75,9 @@ void Connection::closeConn() { io_.loop.break_loop(); if (sock_.valid()) { io_.stop(); - sock_.close(); + if rx_unlikely (sock_.close() != 0) { + perror("sock_.close() error"); + } } timeout_.stop(); async_.stop(); @@ -77,6 +90,7 @@ void Connection::closeConn() { template void Connection::callback(ev::io & /*watcher*/, int revents) { if (ev::ERROR & revents) return; + ++rwCounter_; if (revents & ev::READ) { const auto res = read_cb(); @@ -147,7 +161,9 @@ typename Connection::ReadResT Connection::read_cb() { closeConn(); return ReadResT::Default; } else if (nread > 0) { - if (stats_) stats_->update_read_stats(nread); + if (stats_) { + stats_->update_read_stats(nread); + } rdBuf_.advance_head(nread); if (!closeConn_) { if (onRead() == ReadResT::Rebalanced) { @@ -160,15 +176,17 @@ typename Connection::ReadResT Connection::read_cb() { return ReadResT::Default; } template -void Connection::timeout_cb(ev::periodic &watcher, int /*time*/) { +void Connection::timeout_cb(ev::periodic & /*watcher*/, int /*time*/) { + const bool isActive = lastCheckRWCounter_ != rwCounter_; + lastCheckRWCounter_ = rwCounter_; + if (isActive) { + return; + } if (sock_.has_pending_data()) { fprintf(stdout, "Connection got idle timeout, but socket has pending data. Do not dropping the connection\nThis probably means, that " "there are some very long queries in some of the connections, which may affect the other connections. Consider to use " "dedicated threads for them\n"); - if (!watcher.has_period()) { - watcher.start(watcher.last_delay()); - } return; } @@ -180,7 +198,8 @@ void Connection::async_cb(ev::async &) { callback(io_, ev::WRITE); } -template class Connection; template class Connection; +template class Connection; + } // namespace net } // namespace reindexer diff --git a/cpp_src/net/connection.h b/cpp_src/net/connection.h index f7365c8b5..67751b589 100644 --- a/cpp_src/net/connection.h +++ b/cpp_src/net/connection.h @@ -35,8 +35,8 @@ using reindexer::cbuf; template class Connection { public: - Connection(int fd, ev::dynamic_loop &loop, bool enableStat, size_t readBufSize = kConnReadbufSize, - size_t writeBufSize = kConnWriteBufSize); + Connection(socket &&s, ev::dynamic_loop &loop, bool enableStat, size_t readBufSize = kConnReadbufSize, + size_t writeBufSize = kConnWriteBufSize, int idleTimeout = -1); virtual ~Connection(); protected: @@ -56,28 +56,41 @@ class Connection { void closeConn(); void attach(ev::dynamic_loop &loop); void detach(); - void restart(int fd); + void restart(socket &&s); ssize_t async_read(); + socket sock_; ev::io io_; - ev::timer timeout_; ev::async async_; - socket sock_; int curEvents_ = 0; bool closeConn_ = false; bool attached_ = false; bool canWrite_ = true; + int64_t rwCounter_ = 0; + int64_t lastCheckRWCounter_ = 0; chain_buf wrBuf_; cbuf rdBuf_; std::string clientAddr_; std::unique_ptr stats_; + +private: + void restartIdleCheckTimer() noexcept { + lastCheckRWCounter_ = rwCounter_ = 0; + if (kIdleCheckPeriod_ > 0) { + timeout_.start(kIdleCheckPeriod_, kIdleCheckPeriod_); + } + } + + ev::timer timeout_; + const int kIdleCheckPeriod_; }; using ConnectionST = Connection; using ConnectionMT = Connection; + } // namespace net } // namespace reindexer diff --git a/cpp_src/net/cproto/clientconnection.cc b/cpp_src/net/cproto/clientconnection.cc index 929cd73b1..c8664f646 100644 --- a/cpp_src/net/cproto/clientconnection.cc +++ b/cpp_src/net/cproto/clientconnection.cc @@ -20,7 +20,7 @@ bool ClientConnection::ConnectData::CurrDsnFailed(int failedDsnIdx) const { retu int ClientConnection::ConnectData::GetNextDsnIndex() const { return (validEntryIdx.load(std::memory_order_acquire) + 1) % entries.size(); } ClientConnection::ClientConnection(ev::dynamic_loop &loop, ConnectData *connectData, ConnectionFailCallback connectionFailCallback) - : ConnectionMT(-1, loop, false), + : ConnectionMT(socket(), loop, false), state_(ConnInit), completions_(kMaxCompletions), seq_(0), @@ -86,7 +86,11 @@ void ClientConnection::connectInternal() noexcept { closeConn(); } }; - sock_.connect((connectEntry.uri.hostname() + ":" + port)); + if (sock_.connect((connectEntry.uri.hostname() + ':' + port), socket_domain::tcp) != 0) { + if rx_unlikely (!sock_.would_block(sock_.last_error())) { + perror("sock_.connect() error"); + } + } if (!sock_.valid()) { completion(RPCAnswer(Error(errNetwork, "Socket connect error: %d", sock_.last_error())), this); } else { diff --git a/cpp_src/net/cproto/coroclientconnection.cc b/cpp_src/net/cproto/coroclientconnection.cc index 6ef1ea903..a9f842203 100644 --- a/cpp_src/net/cproto/coroclientconnection.cc +++ b/cpp_src/net/cproto/coroclientconnection.cc @@ -6,6 +6,7 @@ #include "reindexer_version.h" #include "server/rpcqrwatcher.h" #include "tools/serializer.h" +#include "tools/stringstools.h" #include @@ -32,7 +33,7 @@ CoroClientConnection::CoroClientConnection() wrCh_(kWrChannelSize), seqNums_(kMaxParallelRPCCalls), updatesCh_(kUpdatesChannelSize), - conn_(-1, kReadBufReserveSize, false) { + conn_(kReadBufReserveSize, false) { recycledChuncks_.reserve(kMaxRecycledChuncks); errSyncCh_.close(); seqNums_.close(); @@ -231,14 +232,28 @@ Error CoroClientConnection::login(std::vector &buf) { if (conn_.state() == manual_connection::conn_state::init) { readWg_.wait(); lastError_ = errOK; - std::string port = connectData_.uri.port().length() ? connectData_.uri.port() : std::string("6534"); - int ret = conn_.async_connect(connectData_.uri.hostname() + ":" + port); + int ret = 0; + std::string dbName; + if (connectData_.uri.scheme() == "cproto") { + dbName = connectData_.uri.path(); + std::string port = connectData_.uri.port().length() ? connectData_.uri.port() : std::string("6534"); + ret = conn_.async_connect(connectData_.uri.hostname() + ":" + port, socket_domain::tcp); + } else { + std::vector pathParts; + split(std::string_view(connectData_.uri.path()), ":", true, pathParts); + if (pathParts.size() >= 2) { + dbName = pathParts.back(); + ret = conn_.async_connect(connectData_.uri.path().substr(0, connectData_.uri.path().size() - dbName.size() - 1), + socket_domain::unx); + } else { + ret = conn_.async_connect(connectData_.uri.path(), socket_domain::unx); + } + } if (ret < 0) { // unable to connect return Error(errNetwork, "Connect error"); } - std::string dbName = connectData_.uri.path(); std::string userName = connectData_.uri.username(); std::string password = connectData_.uri.password(); if (dbName[0] == '/') dbName = dbName.substr(1); diff --git a/cpp_src/net/cproto/dispatcher.cc b/cpp_src/net/cproto/dispatcher.cc deleted file mode 100644 index efa3f6d23..000000000 --- a/cpp_src/net/cproto/dispatcher.cc +++ /dev/null @@ -1,30 +0,0 @@ -#include "dispatcher.h" -#include -#include -#include "debug/allocdebug.h" -#include "tools/fsops.h" -#include "tools/stringstools.h" - -namespace reindexer { -namespace net { -namespace cproto { - -Error Dispatcher::handle(Context &ctx) { - if (uint32_t(ctx.call->cmd) < uint32_t(handlers_.size())) { - for (auto &middleware : middlewares_) { - auto ret = middleware(ctx); - if (!ret.ok()) { - return ret; - } - } - auto handler = handlers_[ctx.call->cmd]; - if (handler) { - return handler(ctx); - } - } - return Error(errParams, "Invalid RPC call. CmdCode %08X\n", int(ctx.call->cmd)); -} - -} // namespace cproto -} // namespace net -} // namespace reindexer diff --git a/cpp_src/net/cproto/dispatcher.h b/cpp_src/net/cproto/dispatcher.h index 703fe9dad..7d3bd0144 100644 --- a/cpp_src/net/cproto/dispatcher.h +++ b/cpp_src/net/cproto/dispatcher.h @@ -60,20 +60,13 @@ struct Context { bool respSent; }; -class ServerConnection; - /// Reindexer cproto RPC dispatcher implementation. class Dispatcher { - friend class ServerConnection; - public: - Dispatcher() : handlers_(kCmdCodeMax, nullptr) {} - /// Add handler for command. /// @param cmd - Command code /// @param object - handler class object /// @param func - handler - /// @param hasOptionalArgs - has to be true if func has optional args template void Register(CmdCode cmd, K *object, Error (K::*func)(Context &, Args... args)) { handlers_[cmd] = FuncWrapper{object, func}; @@ -111,9 +104,35 @@ class Dispatcher { onResponse_ = [=](Context &ctx) { (static_cast(object)->*func)(ctx); }; } -protected: - Error handle(Context &ctx); + /// Get reference to the current logger functor + /// @return Log handler reference + const std::function &LoggerRef() const noexcept { return logger_; } + /// Get reference to the current OnClose() functor + /// @return OnClose callback reference + const std::function &OnCloseRef() const noexcept { return onClose_; } + /// Get reference to the current OnResponse() functor + /// @return OnResponse callback reference + const std::function &OnResponseRef() const noexcept { return onResponse_; } + + /// Handle RPC fron the context + /// @param ctx - RPC context + Error Handle(Context &ctx) { + if rx_likely (uint32_t(ctx.call->cmd) < uint32_t(handlers_.size())) { + for (auto &middleware : middlewares_) { + auto ret = middleware(ctx); + if (!ret.ok()) { + return ret; + } + } + auto handler = handlers_[ctx.call->cmd]; + if rx_likely (handler) { + return handler(ctx); + } + } + return Error(errParams, "Invalid RPC call. CmdCode %08X\n", int(ctx.call->cmd)); + } +private: template struct is_optional : public std::false_type {}; @@ -149,7 +168,7 @@ class Dispatcher { using Handler = std::function; - std::vector handlers_; + std::array handlers_; std::vector middlewares_; std::function logger_; diff --git a/cpp_src/net/cproto/serverconnection.cc b/cpp_src/net/cproto/serverconnection.cc index d6cd24ad9..fc7c28da3 100644 --- a/cpp_src/net/cproto/serverconnection.cc +++ b/cpp_src/net/cproto/serverconnection.cc @@ -1,5 +1,3 @@ - - #include "serverconnection.h" #include #include @@ -10,19 +8,18 @@ namespace reindexer { namespace net { namespace cproto { -const auto kCProtoTimeoutSec = 300.; +const auto kCProtoTimeoutSec = 300; const auto kUpdatesResendTimeout = 0.1; const auto kMaxUpdatesBufSize = 1024 * 1024 * 8; -ServerConnection::ServerConnection(int fd, ev::dynamic_loop &loop, Dispatcher &dispatcher, bool enableStat, size_t maxUpdatesSize, +ServerConnection::ServerConnection(socket &&s, ev::dynamic_loop &loop, Dispatcher &dispatcher, bool enableStat, size_t maxUpdatesSize, bool enableCustomBalancing) - : net::ConnectionST(fd, loop, enableStat), + : ConnectionST(std::move(s), loop, enableStat, kConnReadbufSize, kConnWriteBufSize, kCProtoTimeoutSec), dispatcher_(dispatcher), updatesSize_(0), updateLostFlag_(false), maxUpdatesSize_(maxUpdatesSize), balancingType_(enableCustomBalancing ? BalancingType::NotSet : BalancingType::None) { - timeout_.start(kCProtoTimeoutSec); updates_async_.set(this); updates_timeout_.set(this); updates_async_.set(loop); @@ -31,24 +28,22 @@ ServerConnection::ServerConnection(int fd, ev::dynamic_loop &loop, Dispatcher &d updates_timeout_.start(kUpdatesResendTimeout, kUpdatesResendTimeout); updates_async_.start(); - callback(io_, ev::READ); + BaseConnT::callback(BaseConnT::io_, ev::READ); } -ServerConnection::~ServerConnection() { closeConn(); } +ServerConnection::~ServerConnection() { BaseConnT::closeConn(); } -bool ServerConnection::Restart(int fd) { - restart(fd); - timeout_.start(kCProtoTimeoutSec); +bool ServerConnection::Restart(socket &&s) { + BaseConnT::restart(std::move(s)); updates_async_.start(); - callback(io_, ev::READ); + BaseConnT::callback(BaseConnT::io_, ev::READ); return true; } void ServerConnection::Attach(ev::dynamic_loop &loop) { - async_.set(this); - if (!attached_) { - attach(loop); - timeout_.start(kCProtoTimeoutSec); + BaseConnT::async_.set(this); + if (!BaseConnT::attached_) { + BaseConnT::attach(loop); updates_async_.set(loop); updates_async_.start(); updates_timeout_.set(loop); @@ -57,8 +52,8 @@ void ServerConnection::Attach(ev::dynamic_loop &loop) { } void ServerConnection::Detach() { - if (attached_) { - detach(); + if (BaseConnT::attached_) { + BaseConnT::detach(); updates_async_.stop(); updates_async_.reset(); updates_timeout_.stop(); @@ -67,9 +62,9 @@ void ServerConnection::Detach() { } void ServerConnection::onClose() { - if (dispatcher_.onClose_) { + if (dispatcher_.OnCloseRef()) { Context ctx{"", nullptr, this, {{}, {}}, false}; - dispatcher_.onClose_(ctx, errOK); + dispatcher_.OnCloseRef()(ctx, errOK); } clientData_.reset(); balancingType_ = BalancingType::NotSet; @@ -77,26 +72,30 @@ void ServerConnection::onClose() { std::unique_lock lck(updates_mtx_); updates_.clear(); updatesSize_ = 0; - if (ConnectionST::stats_) ConnectionST::stats_->update_pended_updates(0); + if (BaseConnT::stats_) { + BaseConnT::stats_->update_pended_updates(0); + } } void ServerConnection::handleRPC(Context &ctx) { - Error err = dispatcher_.handle(ctx); + Error err = dispatcher_.Handle(ctx); if (!ctx.respSent) { responceRPC(ctx, err, Args()); } } -ServerConnection::ReadResT ServerConnection::onRead() { +ServerConnection::BaseConnT::ReadResT ServerConnection::onRead() { CProtoHeader hdr; - while (!closeConn_) { - Context ctx{clientAddr_, nullptr, this, {{}, {}}, false}; + while (!BaseConnT::closeConn_) { + Context ctx{BaseConnT::clientAddr_, nullptr, this, {{}, {}}, false}; std::string uncompressed; - auto len = rdBuf_.peek(reinterpret_cast(&hdr), sizeof(hdr)); - if (len < sizeof(hdr)) return ReadResT::Default; + auto len = BaseConnT::rdBuf_.peek(reinterpret_cast(&hdr), sizeof(hdr)); + if (len < sizeof(hdr)) { + return BaseConnT::ReadResT::Default; + } if (hdr.magic != kCprotoMagic) { try { @@ -104,8 +103,8 @@ ServerConnection::ReadResT ServerConnection::onRead() { } catch (const Error &err) { fprintf(stderr, "responceRPC unexpected error: %s\n", err.what().c_str()); } - closeConn_ = true; - return ReadResT::Default; + BaseConnT::closeConn_ = true; + return BaseConnT::ReadResT::Default; } if (hdr.version < kCprotoMinCompatVersion) { @@ -117,44 +116,43 @@ ServerConnection::ReadResT ServerConnection::onRead() { } catch (const Error &err) { fprintf(stderr, "responceRPC unexpected error: %s\n", err.what().c_str()); } - closeConn_ = true; - return ReadResT::Default; + BaseConnT::closeConn_ = true; + return BaseConnT::ReadResT::Default; } // Enable compression, only if clients sand compressed data to us enableSnappy_ = (hdr.version >= kCprotoMinSnappyVersion) && hdr.compressed; // Rebalance connection, when first message was recieved - if (balancingType_ == BalancingType::NotSet) { - if (hdr.dedicatedThread && hdr.version >= kCprotoMinDedicatedThreadsVersion) { - balancingType_ = BalancingType::Dedicated; - } else { - balancingType_ = BalancingType::Shared; - } + if rx_unlikely (balancingType_ == BalancingType::NotSet) { + balancingType_ = (hdr.dedicatedThread && hdr.version >= kCprotoMinDedicatedThreadsVersion) ? BalancingType::Dedicated + : BalancingType::Shared; hasPendingData_ = true; if (rebalance_) { rebalance_(this, balancingType_); // After rebalancing this connection will probably be handled in another thread. Any code here after rebalance_() may lead // to data race - return ReadResT::Rebalanced; + return BaseConnT::ReadResT::Rebalanced; } - return ReadResT::Default; + return BaseConnT::ReadResT::Default; } - if (size_t(hdr.len) + sizeof(hdr) > rdBuf_.capacity()) { - rdBuf_.reserve(size_t(hdr.len) + sizeof(hdr) + 0x1000); + if (size_t(hdr.len) + sizeof(hdr) > BaseConnT::rdBuf_.capacity()) { + BaseConnT::rdBuf_.reserve(size_t(hdr.len) + sizeof(hdr) + 0x1000); } - if (size_t(hdr.len) + sizeof(hdr) > rdBuf_.size()) { - if (!rdBuf_.size()) rdBuf_.clear(); - return ReadResT::Default; + if (size_t(hdr.len) + sizeof(hdr) > BaseConnT::rdBuf_.size()) { + if (!BaseConnT::rdBuf_.size()) { + BaseConnT::rdBuf_.clear(); + } + return BaseConnT::ReadResT::Default; } - rdBuf_.erase(sizeof(hdr)); + BaseConnT::rdBuf_.erase(sizeof(hdr)); - auto it = rdBuf_.tail(); + auto it = BaseConnT::rdBuf_.tail(); if (it.size() < size_t(hdr.len)) { - rdBuf_.unroll(); - it = rdBuf_.tail(); + BaseConnT::rdBuf_.unroll(); + it = BaseConnT::rdBuf_.tail(); } assertrx(it.size() >= size_t(hdr.len)); @@ -165,7 +163,7 @@ ServerConnection::ReadResT ServerConnection::onRead() { ctx.call->seq = hdr.seq; Serializer ser(it.data(), hdr.len); if (hdr.compressed) { - if (!snappy::Uncompress(it.data(), hdr.len, &uncompressed)) { + if rx_unlikely (!snappy::Uncompress(it.data(), hdr.len, &uncompressed)) { throw Error(errParseBin, "Can't decompress data from peer"); } @@ -189,14 +187,14 @@ ServerConnection::ReadResT ServerConnection::onRead() { } catch (const std::exception &err) { handleException(ctx, Error(errLogic, err.what())); } catch (...) { - handleException(ctx, Error(errLogic, "Unknow exception")); + handleException(ctx, Error(errLogic, "Unknown exception")); } - rdBuf_.erase(hdr.len); - timeout_.start(kCProtoTimeoutSec); + BaseConnT::rdBuf_.erase(hdr.len); } - return ReadResT::Default; + return BaseConnT::ReadResT::Default; } + static void packRPC(WrSerializer &ser, Context &ctx, const Error &status, const Args &args, bool enableSnappy) { CProtoHeader hdr; hdr.len = 0; @@ -239,19 +237,21 @@ static chunk packRPC(chunk chunk, Context &ctx, const Error &status, const Args } void ServerConnection::responceRPC(Context &ctx, const Error &status, const Args &args) { - if (ctx.respSent) { + if rx_unlikely (ctx.respSent) { fprintf(stderr, "Warning - RPC responce already sent\n"); return; } - auto &&chunk = packRPC(wrBuf_.get_chunk(), ctx, status, args, enableSnappy_); + auto &&chunk = packRPC(BaseConnT::wrBuf_.get_chunk(), ctx, status, args, enableSnappy_); auto len = chunk.len_; - wrBuf_.write(std::move(chunk)); - if (ConnectionST::stats_) ConnectionST::stats_->update_send_buf_size(wrBuf_.data_size()); + BaseConnT::wrBuf_.write(std::move(chunk)); + if (BaseConnT::stats_) { + BaseConnT::stats_->update_send_buf_size(BaseConnT::wrBuf_.data_size()); + } - if (dispatcher_.onResponse_) { + if (dispatcher_.OnResponseRef()) { ctx.stat.sizeStat.respSizeBytes = len; - dispatcher_.onResponse_(ctx); + dispatcher_.OnResponseRef()(ctx); } ctx.respSent = true; @@ -259,8 +259,8 @@ void ServerConnection::responceRPC(Context &ctx, const Error &status, const Args // write_cb(); // } - if (dispatcher_.logger_ != nullptr) { - dispatcher_.logger_(ctx, status, args); + if (dispatcher_.LoggerRef()) { + dispatcher_.LoggerRef()(ctx, status, args); } } @@ -292,21 +292,21 @@ void ServerConnection::CallRPC(const IRPCCall &call) { updates_.emplace_back(std::move(callLost)); updateLostFlag_ = true; - if (ConnectionST::stats_) { - if (auto stat = ConnectionST::stats_->get_stat(); stat) { + if (BaseConnT::stats_) { + if (auto stat = BaseConnT::stats_->get_stat(); stat) { stat->updates_lost.fetch_add(1, std::memory_order_relaxed); stat->pended_updates.store(1, std::memory_order_relaxed); } } - } else if (ConnectionST::stats_) { - if (auto stat = ConnectionST::stats_->get_stat(); stat) { + } else if (BaseConnT::stats_) { + if (auto stat = BaseConnT::stats_->get_stat(); stat) { stat->pended_updates.store(updates_.size(), std::memory_order_relaxed); } } } void ServerConnection::sendUpdates() { - if (wrBuf_.size() + 10 > wrBuf_.capacity() || wrBuf_.data_size() > kMaxUpdatesBufSize / 2) { + if (BaseConnT::wrBuf_.size() + 10 > BaseConnT::wrBuf_.capacity() || BaseConnT::wrBuf_.data_size() > kMaxUpdatesBufSize / 2) { return; } @@ -329,7 +329,7 @@ void ServerConnection::sendUpdates() { size_t len = 0; Args args; CmdCode cmd; - WrSerializer ser(wrBuf_.get_chunk()); + WrSerializer ser(BaseConnT::wrBuf_.get_chunk()); size_t cnt = 0; size_t updatesSizeBuffered = 0; for (cnt = 0; cnt < updates.size() && ser.Len() < kMaxUpdatesBufSize; ++cnt) { @@ -343,7 +343,7 @@ void ServerConnection::sendUpdates() { len = ser.Len(); try { - wrBuf_.write(ser.DetachChunk()); + BaseConnT::wrBuf_.write(ser.DetachChunk()); } catch (...) { RPCCall callLost{kCmdUpdates, 0, {}, milliseconds(0)}; cproto::Context ctxLost{"", &callLost, this, {{}, {}}, false}; @@ -359,15 +359,15 @@ void ServerConnection::sendUpdates() { packRPC(ser, ctxLost, Error(), {Arg(std::string(""))}, enableSnappy_); len = ser.Len(); wrBuf_.write(ser.DetachChunk()); - if (ConnectionST::stats_) { - ConnectionST::stats_->update_send_buf_size(wrBuf_.data_size()); - ConnectionST::stats_->update_pended_updates(0); + if (BaseConnT::stats_) { + BaseConnT::stats_->update_send_buf_size(wrBuf_.data_size()); + BaseConnT::stats_->update_pended_updates(0); } } - if (dispatcher_.onResponse_) { + if (dispatcher_.OnResponseRef()) { ctx.stat.sizeStat.respSizeBytes = len; - dispatcher_.onResponse_(ctxLost); + dispatcher_.OnResponseRef()(ctxLost); } callback(io_, ev::WRITE); @@ -381,25 +381,27 @@ void ServerConnection::sendUpdates() { updatesSize_ += updatesSizeCopy - updatesSizeBuffered; } - if (ConnectionST::stats_) stats_->update_pended_updates(updates.size()); - } else if (ConnectionST::stats_) { - if (auto stat = ConnectionST::stats_->get_stat(); stat) { + if (BaseConnT::stats_) stats_->update_pended_updates(updates.size()); + } else if (BaseConnT::stats_) { + if (auto stat = BaseConnT::stats_->get_stat(); stat) { std::lock_guard lck(updates_mtx_); stat->pended_updates.store(updates_.size(), std::memory_order_relaxed); } } - if (ConnectionST::stats_) ConnectionST::stats_->update_send_buf_size(wrBuf_.data_size()); + if (BaseConnT::stats_) { + BaseConnT::stats_->update_send_buf_size(BaseConnT::wrBuf_.data_size()); + } - if (dispatcher_.onResponse_) { + if (dispatcher_.OnResponseRef()) { ctx.stat.sizeStat.respSizeBytes = len; - dispatcher_.onResponse_(ctx); + dispatcher_.OnResponseRef()(ctx); } - callback(io_, ev::WRITE); + BaseConnT::callback(BaseConnT::io_, ev::WRITE); } -void ServerConnection::handleException(Context &ctx, const Error &err) { +void ServerConnection::handleException(Context &ctx, const Error &err) noexcept { // Exception occurs on unrecoverable error. Send responce, and drop connection fprintf(stderr, "Dropping RPC-connection. Reason: %s\n", err.what().c_str()); try { @@ -413,7 +415,7 @@ void ServerConnection::handleException(Context &ctx, const Error &err) { } catch (...) { fprintf(stderr, "responceRPC unexpected error (unknow exception)\n"); } - closeConn_ = true; + BaseConnT::closeConn_ = true; } } // namespace cproto diff --git a/cpp_src/net/cproto/serverconnection.h b/cpp_src/net/cproto/serverconnection.h index c2d155e6b..be2f661d3 100644 --- a/cpp_src/net/cproto/serverconnection.h +++ b/cpp_src/net/cproto/serverconnection.h @@ -14,20 +14,22 @@ namespace cproto { using reindexer::h_vector; -class ServerConnection : public ConnectionST, public IServerConnection, public Writer { +class ServerConnection final : public ConnectionST, public IServerConnection, public Writer { public: - ServerConnection(int fd, ev::dynamic_loop &loop, Dispatcher &dispatcher, bool enableStat, size_t maxUpdatesSize, + using BaseConnT = ConnectionST; + + ServerConnection(socket &&s, ev::dynamic_loop &loop, Dispatcher &dispatcher, bool enableStat, size_t maxUpdatesSize, bool enableCustomBalancing); - ~ServerConnection(); + ~ServerConnection() override; // IServerConnection interface implementation static ConnectionFactory NewFactory(Dispatcher &dispatcher, bool enableStat, size_t maxUpdatesSize) { - return [&dispatcher, enableStat, maxUpdatesSize](ev::dynamic_loop &loop, int fd, bool allowCustomBalancing) { - return new ServerConnection(fd, loop, dispatcher, enableStat, maxUpdatesSize, allowCustomBalancing); + return [&dispatcher, enableStat, maxUpdatesSize](ev::dynamic_loop &loop, socket &&s, bool allowCustomBalancing) { + return new ServerConnection(std::move(s), loop, dispatcher, enableStat, maxUpdatesSize, allowCustomBalancing); }; } - bool IsFinished() const noexcept override final { return !sock_.valid(); } + bool IsFinished() const noexcept override final { return !BaseConnT::sock_.valid(); } BalancingType GetBalancingType() const noexcept override final { return balancingType_; } void SetRebalanceCallback(std::function cb) override final { assertrx(!rebalance_); @@ -39,9 +41,9 @@ class ServerConnection : public ConnectionST, public IServerConnection, public W hasPendingData_ = false; onRead(); } - callback(io_, ev::READ); + BaseConnT::callback(BaseConnT::io_, ev::READ); } - bool Restart(int fd) override final; + bool Restart(socket &&s) override final; void Detach() override final; void Attach(ev::dynamic_loop &loop) override final; @@ -51,18 +53,18 @@ class ServerConnection : public ConnectionST, public IServerConnection, public W void SetClientData(std::unique_ptr &&data) noexcept override final { clientData_ = std::move(data); } ClientData *GetClientData() noexcept override final { return clientData_.get(); } std::shared_ptr GetConnectionStat() noexcept override final { - return ConnectionST::stats_ ? ConnectionST::stats_->get_stat() : std::shared_ptr(); + return BaseConnT::stats_ ? BaseConnT::stats_->get_stat() : std::shared_ptr(); } protected: - ReadResT onRead() override; + typename BaseConnT::ReadResT onRead() override; void onClose() override; void handleRPC(Context &ctx); void responceRPC(Context &ctx, const Error &error, const Args &args); void async_cb(ev::async &) { sendUpdates(); } void timeout_cb(ev::periodic &, int) { sendUpdates(); } void sendUpdates(); - void handleException(Context &ctx, const Error &err); + void handleException(Context &ctx, const Error &err) noexcept; Dispatcher &dispatcher_; std::unique_ptr clientData_; @@ -82,6 +84,7 @@ class ServerConnection : public ConnectionST, public IServerConnection, public W BalancingType balancingType_ = BalancingType::NotSet; std::function rebalance_; }; + } // namespace cproto } // namespace net } // namespace reindexer diff --git a/cpp_src/net/ev/ev.cc b/cpp_src/net/ev/ev.cc index 8e3d0f790..84ad0b518 100644 --- a/cpp_src/net/ev/ev.cc +++ b/cpp_src/net/ev/ev.cc @@ -35,7 +35,7 @@ void loop_posix_base::enable_asyncs() { if (async_fd_ < 0) { async_fd_ = eventfd(0, EFD_NONBLOCK); if (async_fd_ < 0) { - perror("eventfd:"); + perror("eventfd error"); } owner_->set(async_fd_, nullptr, READ); } @@ -70,7 +70,7 @@ loop_posix_base::~loop_posix_base() { void loop_posix_base::enable_asyncs() { if (async_fds_[0] < 0) { if (pipe(async_fds_) < 0) { - perror("pipe:"); + perror("pipe error"); } owner_->set(async_fds_[0], nullptr, READ); } @@ -102,16 +102,16 @@ class loop_select_backend_private { }; loop_select_backend::loop_select_backend() : private_(new loop_select_backend_private) {} -loop_select_backend::~loop_select_backend() {} +loop_select_backend::~loop_select_backend() = default; -void loop_select_backend::init(dynamic_loop *owner) { +void loop_select_backend::init(dynamic_loop *owner) noexcept { owner_ = owner; private_->maxfd_ = -1; FD_ZERO(&private_->rfds_); FD_ZERO(&private_->wfds_); } -void loop_select_backend::set(int fd, int events, int /*oldevents*/) { +void loop_select_backend::set(int fd, int events, int /*oldevents*/) noexcept { assertrx(fd < capacity()); if (fd > private_->maxfd_) private_->maxfd_ = fd; @@ -129,7 +129,7 @@ void loop_select_backend::set(int fd, int events, int /*oldevents*/) { } } -void loop_select_backend::stop(int fd) { +void loop_select_backend::stop(int fd) noexcept { FD_CLR(fd, &private_->rfds_); FD_CLR(fd, &private_->wfds_); @@ -157,7 +157,7 @@ int loop_select_backend::runonce(int64_t t) { return ret; } -int loop_select_backend::capacity() { return FD_SETSIZE; } +int loop_select_backend::capacity() noexcept { return FD_SETSIZE; } #endif @@ -249,7 +249,7 @@ void loop_epoll_backend::init(dynamic_loop *owner) { owner_ = owner; private_->ctlfd_ = epoll_create1(EPOLL_CLOEXEC); if (private_->ctlfd_ < 0) { - perror("epoll_create"); + perror("epoll_create error"); } private_->events_.reserve(2048); private_->events_.resize(1); @@ -261,7 +261,7 @@ void loop_epoll_backend::set(int fd, int events, int oldevents) { ev.events = ((events & READ) ? int(EPOLLIN) | int(EPOLLHUP) : 0) | ((events & WRITE) ? int(EPOLLOUT) : 0) /*| EPOLLET*/; ev.data.fd = fd; if (epoll_ctl(private_->ctlfd_, oldevents == 0 ? EPOLL_CTL_ADD : EPOLL_CTL_MOD, fd, &ev) < 0) { - perror("epoll_ctl EPOLL_CTL_MOD"); + perror("epoll_ctl EPOLL_CTL_MOD error"); } if (oldevents == 0) { private_->events_.emplace_back(); @@ -272,7 +272,7 @@ void loop_epoll_backend::stop(int fd) { epoll_event ev; ev.data.fd = fd; if (epoll_ctl(private_->ctlfd_, EPOLL_CTL_DEL, fd, &ev) < 0) { - perror("epoll_ctl EPOLL_CTL_DEL"); + perror("epoll_ctl EPOLL_CTL_DEL error"); } private_->events_.pop_back(); } @@ -369,7 +369,7 @@ int loop_wsa_backend::runonce(int64_t t) { int ret = WaitForMultipleObjects(ecount, objs, FALSE, t != -1 ? t / 1000 : INFINITE); if (ret < 0) { - perror("WaitForMultipleObjects"); + perror("WaitForMultipleObjects error"); return ret; } @@ -407,7 +407,7 @@ int loop_wsa_backend::capacity() { return WSA_MAXIMUM_WAIT_EVENTS - 2; } static std::atomic signalsMask; -extern "C" void net_ev_sighandler(int signum) { +static void net_ev_sighandler(int signum) { signalsMask |= (1 << signum); #ifdef _WIN32 SetEvent(gSigEvent); @@ -544,7 +544,7 @@ void dynamic_loop::stop(timer *watcher) { void dynamic_loop::set(sig *watcher) { auto it = std::find(sigs_.begin(), sigs_.end(), watcher); if (it != sigs_.end()) { - printf("sig %d already set\n", watcher->signum_); + fprintf(stderr, "sig %d already set\n", watcher->signum_); return; } sigs_.push_back(watcher); @@ -556,7 +556,7 @@ void dynamic_loop::set(sig *watcher) { auto res = sigaction(watcher->signum_, &new_action, &old_action); if (res < 0) { - printf("sigaction error: %d\n", res); + fprintf(stderr, "sigaction error: %d\n", res); return; } watcher->old_action_ = old_action; @@ -568,14 +568,14 @@ void dynamic_loop::set(sig *watcher) { void dynamic_loop::stop(sig *watcher) { auto it = std::find(sigs_.begin(), sigs_.end(), watcher); if (it == sigs_.end()) { - printf("sig %d is not set\n", watcher->signum_); + fprintf(stderr, "sig %d is not set\n", watcher->signum_); return; } sigs_.erase(it); #ifndef _WIN32 auto res = sigaction(watcher->signum_, &(watcher->old_action_), 0); if (res < 0) { - printf("sigaction error: %d\n", res); + fprintf(stderr, "sigaction error: %d\n", res); return; } #else diff --git a/cpp_src/net/ev/ev.h b/cpp_src/net/ev/ev.h index d0e9608a8..6c7aed5f1 100644 --- a/cpp_src/net/ev/ev.h +++ b/cpp_src/net/ev/ev.h @@ -90,11 +90,11 @@ class loop_select_backend : public loop_posix_base { public: loop_select_backend(); ~loop_select_backend(); - void init(dynamic_loop *owner); - void set(int fd, int events, int oldevents); - void stop(int fd); + void init(dynamic_loop *owner) noexcept; + void set(int fd, int events, int oldevents) noexcept; + void stop(int fd) noexcept; int runonce(int64_t tv); - static int capacity(); + static int capacity() noexcept; protected: std::unique_ptr private_; @@ -310,7 +310,6 @@ class timer { void set(dynamic_loop &loop_) noexcept { loop.loop_ = &loop_; } void start(double t, double p = 0) { period_ = p; - t_ = t; loop.set(this, t); } void stop() { loop.stop(this); } @@ -323,7 +322,6 @@ class timer { void set(std::function func) noexcept { func_ = std::move(func); } bool is_active() const noexcept { return loop.is_active(this); } - double last_delay() const noexcept { return t_; } bool has_period() const noexcept { return period_ > 0.00000001; } loop_ref loop; @@ -348,7 +346,6 @@ class timer { std::function func_ = nullptr; double period_ = 0; - double t_ = 0; bool in_coro_storage_ = false; }; diff --git a/cpp_src/net/http/router.cc b/cpp_src/net/http/router.cc index ef56ddff2..2797d979c 100644 --- a/cpp_src/net/http/router.cc +++ b/cpp_src/net/http/router.cc @@ -50,9 +50,12 @@ HttpStatusCode HttpStatus::errCodeToHttpStatus(int errCode) { case errParams: case errParseSQL: case errParseDSL: + case errQueryExec: return StatusBadRequest; case errForbidden: return StatusForbidden; + case errTimeout: + return StatusRequestTimeout; default: return StatusInternalServerError; } diff --git a/cpp_src/net/http/router.h b/cpp_src/net/http/router.h index 6fc5fca6b..8f1fe2b97 100644 --- a/cpp_src/net/http/router.h +++ b/cpp_src/net/http/router.h @@ -63,7 +63,7 @@ enum HttpMethod : int { typedef std::string_view UrlParam; struct HttpStatus { - HttpStatus() { code = StatusOK; } + HttpStatus() noexcept : code(StatusOK) {} HttpStatus(HttpStatusCode httpcode, std::string httpwhat) : code(httpcode), what(std::move(httpwhat)) {} explicit HttpStatus(const Error &err) : what(err.what()) { code = errCodeToHttpStatus(err.code()); } diff --git a/cpp_src/net/http/serverconnection.cc b/cpp_src/net/http/serverconnection.cc index 3ce8ea537..0376881ca 100644 --- a/cpp_src/net/http/serverconnection.cc +++ b/cpp_src/net/http/serverconnection.cc @@ -19,15 +19,15 @@ using namespace std::string_view_literals; static const std::string_view kStrEOL = "\r\n"sv; extern std::unordered_map kHTTPCodes; -ServerConnection::ServerConnection(int fd, ev::dynamic_loop &loop, Router &router, size_t maxRequestSize) - : ConnectionST(fd, loop, false, maxRequestSize < kConnReadbufSize ? maxRequestSize : kConnReadbufSize), +ServerConnection::ServerConnection(socket &&s, ev::dynamic_loop &loop, Router &router, size_t maxRequestSize) + : ConnectionST(std::move(s), loop, false, maxRequestSize < kConnReadbufSize ? maxRequestSize : kConnReadbufSize), router_(router), maxRequestSize_(maxRequestSize) { callback(io_, ev::READ); } -bool ServerConnection::Restart(int fd) { - restart(fd); +bool ServerConnection::Restart(socket &&s) { + restart(std::move(s)); bodyLeft_ = 0; formData_ = false; enableHttp11_ = false; diff --git a/cpp_src/net/http/serverconnection.h b/cpp_src/net/http/serverconnection.h index 2b1748c8f..261dca784 100644 --- a/cpp_src/net/http/serverconnection.h +++ b/cpp_src/net/http/serverconnection.h @@ -12,14 +12,14 @@ namespace net { namespace http { const ssize_t kHttpMaxHeaders = 128; -class ServerConnection : public IServerConnection, public ConnectionST { +class ServerConnection final : public IServerConnection, public ConnectionST { public: - ServerConnection(int fd, ev::dynamic_loop &loop, Router &router, size_t maxRequestSize); + ServerConnection(socket &&s, ev::dynamic_loop &loop, Router &router, size_t maxRequestSize); static ConnectionFactory NewFactory(Router &router, size_t maxRequestSize) { - return [&router, maxRequestSize](ev::dynamic_loop &loop, int fd, bool allowCustomBalancing) { + return [&router, maxRequestSize](ev::dynamic_loop &loop, socket &&s, bool allowCustomBalancing) { (void)allowCustomBalancing; - return new ServerConnection(fd, loop, router, maxRequestSize); + return new ServerConnection(std::move(s), loop, router, maxRequestSize); }; } @@ -28,7 +28,7 @@ class ServerConnection : public IServerConnection, public ConnectionST { void SetRebalanceCallback(std::function cb) override final { (void)cb; } bool HasPendingData() const noexcept override final { return false; } void HandlePendingData() override final {} - bool Restart(int fd) override final; + bool Restart(socket &&s) override final; void Detach() override final; void Attach(ev::dynamic_loop &loop) override final; diff --git a/cpp_src/net/iserverconnection.h b/cpp_src/net/iserverconnection.h index 8d06be8cd..d51d3b3aa 100644 --- a/cpp_src/net/iserverconnection.h +++ b/cpp_src/net/iserverconnection.h @@ -6,6 +6,8 @@ namespace reindexer { namespace net { +class socket; + /// Server side network connection interface for Listener. class IServerConnection { public: @@ -29,9 +31,9 @@ class IServerConnection { virtual void HandlePendingData() = 0; /// Restart connection - /// @param fd - file descriptor of accepted connection. + /// @param s - socket of the accepted connection. /// @return true - if successfuly restarted, false - if connection can't be restarted. - virtual bool Restart(int fd) = 0; + virtual bool Restart(socket &&s) = 0; /// Attach connection to another listener loop. Must be called from thread of loop /// @param loop - another loop to bind virtual void Attach(ev::dynamic_loop &loop) = 0; @@ -41,9 +43,9 @@ class IServerConnection { /// Functor factory type for creating new connection. Listener will call this factory after accept of client connection. /// @param loop - Current loop of Listener's thread. -/// @param fd file - Descriptor of accepted connection. +/// @param s - Socket of the accepted connection. /// @param allowCustomBalancing - true, if caller supports custom balancing hints -typedef std::function ConnectionFactory; +typedef std::function ConnectionFactory; } // namespace net } // namespace reindexer diff --git a/cpp_src/net/listener.cc b/cpp_src/net/listener.cc index c5cfcc232..1f7790aa0 100644 --- a/cpp_src/net/listener.cc +++ b/cpp_src/net/listener.cc @@ -42,14 +42,14 @@ Listener::~Listener() { } template -bool Listener::Bind(std::string addr) { +bool Listener::Bind(std::string addr, socket_domain type) { if (shared_->sock_.valid()) { return false; } shared_->addr_ = std::move(addr); - if (shared_->sock_.bind(shared_->addr_) < 0) { + if (shared_->sock_.bind(shared_->addr_, type) < 0) { return false; } @@ -90,7 +90,6 @@ void Listener::io_accept(ev::io & /*watcher*/, int revents) { } if (shared_->terminating_) { - client.close(); logPrintf(LogWarning, "Can't accept connection. Listener is terminating!"); return; } @@ -103,10 +102,10 @@ void Listener::io_accept(ev::io & /*watcher*/, int revents) { shared_->idle_.pop_back(); lck.unlock(); conn->Attach(loop_); - conn->Restart(client.fd()); + conn->Restart(std::move(client)); } else { lck.unlock(); - conn = std::unique_ptr(shared_->connFactory_(loop_, client.fd(), LT == ListenerType::Mixed)); + conn = std::unique_ptr(shared_->connFactory_(loop_, std::move(client), LT == ListenerType::Mixed)); connIsActive = !conn->IsFinished(); } @@ -422,12 +421,11 @@ Listener::Shared::Shared(ConnectionFactory &&connFactory, int maxListeners) template Listener::Shared::~Shared() { - sock_.close(); + if rx_unlikely (sock_.close() != 0) { + perror("sock_.close() error"); + } } -template class Listener; -template class Listener; - ForkedListener::ForkedListener(ev::dynamic_loop &loop, ConnectionFactory &&connFactory) : connFactory_(std::move(connFactory)), loop_(loop) { io_.set(this); @@ -442,17 +440,19 @@ ForkedListener::~ForkedListener() { if (!terminating_ || runningThreadsCount_) { ForkedListener::Stop(); } - sock_.close(); + if rx_unlikely (sock_.close() != 0) { + perror("sock_.close() error"); + } } -bool ForkedListener::Bind(std::string addr) { +bool ForkedListener::Bind(std::string addr, socket_domain type) { if (sock_.valid()) { return false; } addr_ = std::move(addr); - if (sock_.bind(addr_) < 0) { + if (sock_.bind(addr_, type) < 0) { return false; } @@ -481,13 +481,12 @@ void ForkedListener::io_accept(ev::io & /*watcher*/, int revents) { } if (terminating_) { - client.close(); logPrintf(LogWarning, "Can't accept connection. Listener is terminating!"); return; } ++runningThreadsCount_; - std::thread th([this, client]() noexcept { + std::thread th([this, client = std::move(client)]() mutable noexcept { try { #if REINDEX_WITH_GPERFTOOLS if (alloc_ext::TCMallocIsAvailable()) { @@ -500,7 +499,7 @@ void ForkedListener::io_accept(ev::io & /*watcher*/, int revents) { async.set(loop); async.start(); - Worker w(std::unique_ptr(connFactory_(loop, client.fd(), false)), async); + Worker w(std::unique_ptr(connFactory_(loop, std::move(client), false)), async); auto pc = w.conn.get(); if (pc->IsFinished()) { // Connection may be closed inside Worker construction pc->Detach(); @@ -553,5 +552,8 @@ void ForkedListener::Stop() { } } +template class Listener; +template class Listener; + } // namespace net } // namespace reindexer diff --git a/cpp_src/net/listener.h b/cpp_src/net/listener.h index fb100953f..99805955b 100644 --- a/cpp_src/net/listener.h +++ b/cpp_src/net/listener.h @@ -18,9 +18,10 @@ class IListener { public: virtual ~IListener() = default; /// Bind listener to specified host:port - /// @param addr - tcp host:port for bind + /// @param addr - tcp host:port for bind or file path for the unix domain socket + /// @param type - socket's type: tcp or unix /// @return true - if bind successful, false - on bind error - virtual bool Bind(std::string addr) = 0; + virtual bool Bind(std::string addr, socket_domain type) = 0; /// Stop synchroniusly stops listener virtual void Stop() = 0; }; @@ -38,14 +39,8 @@ struct ConnPtrEqual { struct ConnPtrHash { using transparent_key_equal = ConnPtrEqual; - bool operator()(const IServerConnection *ptr) const noexcept { - std::hash h; - return h(uintptr_t(ptr)); - } - bool operator()(const std::unique_ptr &ptr) const noexcept { - std::hash h; - return h(uintptr_t(ptr.get())); - } + size_t operator()(const IServerConnection *ptr) const noexcept { return std::hash()(uintptr_t(ptr)); } + size_t operator()(const std::unique_ptr &ptr) const noexcept { return std::hash()(uintptr_t(ptr.get())); } }; enum class ListenerType { @@ -65,20 +60,21 @@ enum class ListenerType { /// Network listener implementation template -class Listener : public IListener { +class Listener final : public IListener { public: /// Constructs new listner object. /// @param loop - ev::loop of caller's thread, listener's socket will be binded to that loop. /// @param connFactory - Connection factory, will create objects with IServerConnection interface implementation. /// @param maxListeners - Maximum number of threads, which listener will utilize. std::thread::hardware_concurrency() by default Listener(ev::dynamic_loop &loop, ConnectionFactory &&connFactory, int maxListeners = 0); - ~Listener(); + ~Listener() override; /// Bind listener to specified host:port - /// @param addr - tcp host:port for bind + /// @param addr - tcp host:port for bind or file path for the unix domain socket + /// @param type - socket's type: tcp or unix /// @return true - if bind successful, false - on bind error - bool Bind(std::string addr); + bool Bind(std::string addr, socket_domain type) override; /// Stop synchroniusly stops listener - void Stop(); + void Stop() override; protected: void reserve_stack(); @@ -111,7 +107,7 @@ class Listener : public IListener { Shared(ConnectionFactory &&connFactory, int maxListeners); ~Shared(); - socket sock_; + lst_socket sock_; const int maxListeners_; std::atomic listenersCount_ = {0}; std::atomic connCount_ = {0}; @@ -159,19 +155,20 @@ class Listener : public IListener { }; /// Network listener implementation -class ForkedListener : public IListener { +class ForkedListener final : public IListener { public: /// Constructs new listner object. /// @param loop - ev::loop of caller's thread, listener's socket will be binded to that loop. /// @param connFactory - Connection factory, will create objects with IServerConnection interface implementation. ForkedListener(ev::dynamic_loop &loop, ConnectionFactory &&connFactory); - ~ForkedListener(); + ~ForkedListener() override; /// Bind listener to specified host:port - /// @param addr - tcp host:port for bind + /// @param addr - tcp host:port for bind or file path for the unix domain socket + /// @param type - socket's type: tcp or unix /// @return true - if bind successful, false - on bind error - bool Bind(std::string addr); + bool Bind(std::string addr, socket_domain type) override; /// Stop synchroniusly stops listener - void Stop(); + void Stop() override; protected: void io_accept(ev::io &watcher, int revents); @@ -192,7 +189,7 @@ class ForkedListener : public IListener { ev::async *async; }; - socket sock_; + lst_socket sock_; std::mutex mtx_; ConnectionFactory connFactory_; std::atomic terminating_{false}; diff --git a/cpp_src/net/manualconnection.cc b/cpp_src/net/manualconnection.cc index c2a18f0d6..bf0cc3fb1 100644 --- a/cpp_src/net/manualconnection.cc +++ b/cpp_src/net/manualconnection.cc @@ -4,15 +4,8 @@ namespace reindexer { namespace net { -manual_connection::manual_connection(int fd, size_t rd_buf_size, bool enable_stat) - : sock_(fd), buffered_data_(rd_buf_size), stats_(enable_stat ? new connection_stats_collector : nullptr) {} - -manual_connection::~manual_connection() { - if (sock_.valid()) { - io_.stop(); - sock_.close(); - } -} +manual_connection::manual_connection(size_t rd_buf_size, bool enable_stat) + : buffered_data_(rd_buf_size), stats_(enable_stat ? new connection_stats_collector : nullptr) {} void manual_connection::attach(ev::dynamic_loop &loop) noexcept { assertrx(!attached_); @@ -40,7 +33,9 @@ void manual_connection::close_conn(int err) { connect_timer_.stop(); if (sock_.valid()) { io_.stop(); - sock_.close(); + if rx_unlikely (sock_.close() != 0) { + perror("sock_.close() error"); + } } cur_events_ = 0; const bool hadRData = !r_data_.empty(); @@ -59,20 +54,20 @@ void manual_connection::close_conn(int err) { if (stats_) stats_->stop(); } -void manual_connection::restart(int fd) { +void manual_connection::restart(socket &&s) { assertrx(!sock_.valid()); - sock_ = fd; + sock_ = std::move(s); if (stats_) stats_->restart(); } -int manual_connection::async_connect(std::string_view addr) noexcept { +int manual_connection::async_connect(std::string_view addr, socket_domain type) noexcept { connect_timer_.stop(); if (state_ == conn_state::connected || state_ == conn_state::connecting) { close_conn(k_sock_closed_err); } assertrx(w_data_.empty()); ++conn_id_; - int ret = sock_.connect(addr); + int ret = sock_.connect(addr, type); if (ret == 0) { state_ = conn_state::connected; return 0; diff --git a/cpp_src/net/manualconnection.h b/cpp_src/net/manualconnection.h index f0bd5f61c..6175cfb51 100644 --- a/cpp_src/net/manualconnection.h +++ b/cpp_src/net/manualconnection.h @@ -24,14 +24,14 @@ class manual_connection { enum class conn_state { init, connecting, connected }; - manual_connection(int fd, size_t rd_buf_size, bool enable_stat); - virtual ~manual_connection(); + manual_connection(size_t rd_buf_size, bool enable_stat); + virtual ~manual_connection() = default; void set_connect_timeout(std::chrono::milliseconds timeout) noexcept { connect_timeout_ = timeout; } void close_conn(int err); void attach(ev::dynamic_loop &loop) noexcept; void detach() noexcept; - void restart(int fd); + void restart(socket &&s); template void async_read(buf_t &data, size_t cnt, async_cb_t cb) { @@ -59,7 +59,7 @@ class manual_connection { }; return async_write_impl(data, std::move(l), send_now); } - int async_connect(std::string_view addr) noexcept; + int async_connect(std::string_view addr, socket_domain type) noexcept; conn_state state() const noexcept { return state_; } int socket_last_error() const noexcept { return sock_.last_error(); } diff --git a/cpp_src/net/socket.cc b/cpp_src/net/socket.cc index a0a1bcf3d..25eabb803 100644 --- a/cpp_src/net/socket.cc +++ b/cpp_src/net/socket.cc @@ -1,8 +1,7 @@ #include "socket.h" -#include -#include -#include -#include +#include +#include +#include #include #include #include "estl/h_vector.h" @@ -11,59 +10,62 @@ namespace reindexer { namespace net { -int socket::bind(std::string_view addr) { - struct addrinfo *results = nullptr; - int ret = create(addr, &results); - if (!ret) { - assertrx(results != nullptr); - if (::bind(fd_, results->ai_addr, results->ai_addrlen) != 0) { // -V595 - perror("bind error"); - close(); - } - } - if (results) { - freeaddrinfo(results); - } - return ret; +#ifdef _WIN32 +static int print_not_supported() { + fprintf(stderr, "Unix domain socket are not supported on windows\n"); + return -1; } +#endif // _WIN32 -int socket::connect(std::string_view addr) noexcept { - struct addrinfo *results = nullptr; - int ret = create(addr, &results); - if (!ret) { - assertrx(results != nullptr); - if (::connect(fd_, results->ai_addr, results->ai_addrlen) != 0) { // -V595 - if (!would_block(last_error())) { +int socket::connect(std::string_view addr, socket_domain t) { + int ret = 0; + type_ = t; + if (domain() == socket_domain::tcp) { + struct addrinfo *results = nullptr; + ret = create(addr, &results); + if rx_likely (!ret) { + assertrx(results != nullptr); + if rx_unlikely (::connect(fd_, results->ai_addr, results->ai_addrlen) != 0) { // -V595 + if rx_unlikely (!would_block(last_error())) { + perror("connect error"); + close(); + } + ret = -1; + } + } + if rx_likely (results) { + freeaddrinfo(results); + } + } else { +#ifdef _WIN32 + return print_not_supported(); +#else // _WIN32 + if rx_unlikely (create(addr, nullptr) < 0) { + return -1; + } + + struct sockaddr_un address; + address.sun_family = AF_UNIX; + memcpy(address.sun_path, addr.data(), addr.size()); + address.sun_path[addr.size()] = 0; + + if rx_unlikely (::connect(fd_, reinterpret_cast(&address), sizeof(address)) != 0) { // -V595 + if rx_unlikely (!would_block(last_error())) { perror("connect error"); close(); } - ret = -1; + return -1; } - } - if (results) { - freeaddrinfo(results); +#endif // _WIN32 } return ret; } -int socket::listen(int backlog) { -#ifdef __linux__ - int enable = 1; - - if (setsockopt(fd_, SOL_TCP, TCP_DEFER_ACCEPT, &enable, sizeof(enable)) < 0) { - perror("setsockopt(TCP_DEFER_ACCEPT) failed"); - } - if (setsockopt(fd_, SOL_TCP, TCP_QUICKACK, &enable, sizeof(enable)) < 0) { - perror("setsockopt(TCP_QUICKACK) failed"); - } -#endif - return ::listen(fd_, backlog); -} - ssize_t socket::recv(span buf) { // return ::recv(fd_, buf.data(), buf.size(), 0); } + ssize_t socket::send(span buf) { // return ::send(fd_, buf.data(), buf.size(), 0); @@ -83,7 +85,7 @@ ssize_t socket::send(span chunks) { return res == 0 ? numberOfBytesSent : -1; } -#else +#else // _WIN32 ssize_t socket::send(span chunks) { h_vector iov; iov.resize(chunks.size()); @@ -94,116 +96,113 @@ ssize_t socket::send(span chunks) { } return ::writev(fd_, iov.data(), iov.size()); } -#endif - -int socket::close() { - int fd = fd_; - fd_ = -1; -#ifndef _WIN32 - return ::close(fd); -#else - return ::closesocket(fd); -#endif -} +#endif // _WIN32 int socket::create(std::string_view addr, struct addrinfo **presults) { assertrx(!valid()); - struct addrinfo hints, *results = nullptr; - memset(&hints, 0, sizeof(hints)); - hints.ai_flags = AI_PASSIVE; - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; - hints.ai_protocol = IPPROTO_TCP; - *presults = nullptr; - - std::string saddr(addr); - char *paddr = &saddr[0]; - - char *pport = strchr(paddr, ':'); - if (pport == nullptr) { - pport = paddr; - paddr = nullptr; + if (domain() == socket_domain::tcp) { + struct addrinfo hints, *results = nullptr; + memset(&hints, 0, sizeof(hints)); + hints.ai_flags = AI_PASSIVE; + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + hints.ai_protocol = IPPROTO_TCP; + *presults = nullptr; + + std::string saddr(addr); + char *paddr = &saddr[0]; + + char *pport = strchr(paddr, ':'); + if (pport == nullptr) { + pport = paddr; + paddr = nullptr; + } else { + *pport = 0; + if (*paddr == 0) paddr = nullptr; + pport++; + } + + int ret = ::getaddrinfo(paddr, pport, &hints, &results); + if rx_unlikely (ret != 0) { + fprintf(stderr, "getaddrinfo failed: %s\n", gai_strerror(ret)); + return -1; + } + assertrx(results != nullptr); + *presults = results; + + if rx_unlikely ((fd_ = ::socket(results->ai_family, results->ai_socktype, results->ai_protocol)) < 0) { + perror("socket error"); + return -1; + } + + int enable = 1; + if rx_unlikely (::setsockopt(fd_, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast(&enable), sizeof(enable)) < 0) { + perror("setsockopt(SO_REUSEADDR) failed"); + } } else { - *pport = 0; - if (*paddr == 0) paddr = nullptr; - pport++; - } +#ifdef _WIN32 + return print_not_supported(); +#else // _WIN32 + (void)addr; + (void)presults; + assertrx(!presults); - int ret = ::getaddrinfo(paddr, pport, &hints, &results); - if (ret != 0) { - fprintf(stderr, "getaddrinfo failed: %s\n", gai_strerror(ret)); - return -1; + if rx_unlikely ((fd_ = ::socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { + perror("socket error"); + return -1; + } +#endif // _WIN32 } - assertrx(results != nullptr); - *presults = results; - if ((fd_ = ::socket(results->ai_family, results->ai_socktype, results->ai_protocol)) < 0) { - perror("socket error"); - return -1; + if rx_unlikely (set_nodelay() < 0) { + perror("set_nodelay() failed"); } - set_nonblock(); - - int enable = 1; - if (::setsockopt(fd_, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast(&enable), sizeof(enable)) < 0) { - perror("setsockopt(SO_REUSEADDR) failed"); + if rx_unlikely (set_nonblock() < 0) { + perror("set_nonblock() failed"); } - set_nodelay(); - return 0; } std::string socket::addr() const { - struct sockaddr_storage saddr; - struct sockaddr *paddr = reinterpret_cast(&saddr); - socklen_t len = sizeof(saddr); - if (::getpeername(fd_, paddr, &len) == 0) { - char buf[INET_ADDRSTRLEN] = {}; - auto port = ntohs(reinterpret_cast(paddr)->sin_port); - if (getnameinfo(paddr, len, buf, INET_ADDRSTRLEN, NULL, 0, NI_NUMERICHOST) == 0) { - return std::string(buf) + ":" + std::to_string(port); + if (domain() == socket_domain::tcp) { + struct sockaddr_storage saddr; + struct sockaddr *paddr = reinterpret_cast(&saddr); + socklen_t len = sizeof(saddr); + if rx_likely (::getpeername(fd_, paddr, &len) == 0) { + char buf[INET_ADDRSTRLEN] = {}; + auto port = ntohs(reinterpret_cast(paddr)->sin_port); + if rx_likely (getnameinfo(paddr, len, buf, INET_ADDRSTRLEN, NULL, 0, NI_NUMERICHOST) == 0) { + return std::string(buf) + ':' + std::to_string(port); + } else { + perror("getnameinfo error"); + } } else { - perror("getnameinfo"); + perror("getpeername error"); } + return std::string(); } else { - perror("getpeername"); + return std::string("unx_dmn"); } - return std::string(); -} - -socket socket::accept() { - struct sockaddr client_addr; - memset(&client_addr, 0, sizeof(client_addr)); - socklen_t client_len = sizeof(client_addr); - -#ifdef __linux__ - socket client = ::accept4(fd_, &client_addr, &client_len, SOCK_NONBLOCK); - -#else - socket client = ::accept(fd_, &client_addr, &client_len); - if (client.valid()) { - client.set_nonblock(); - } -#endif - if (client.valid()) { - client.set_nodelay(); - } - return client; } int socket::set_nonblock() { #ifndef _WIN32 return fcntl(fd_, F_SETFL, fcntl(fd_, F_GETFL, 0) | O_NONBLOCK); -#else +#else // _WIN32 u_long flag = 1; return ioctlsocket(fd_, FIONBIO, &flag); -#endif +#endif // _WIN32 } -int socket::set_nodelay() { - int flag = 1; - return setsockopt(fd_, SOL_TCP, TCP_NODELAY, reinterpret_cast(&flag), sizeof(flag)); +int socket::set_nodelay() noexcept { + if (domain() == socket_domain::tcp) { + int flag = 1; + return setsockopt(fd_, SOL_TCP, TCP_NODELAY, reinterpret_cast(&flag), sizeof(flag)); + } else { + return 0; + } } bool socket::has_pending_data() const noexcept { @@ -213,27 +212,28 @@ bool socket::has_pending_data() const noexcept { #ifndef _WIN32 int count; if (ioctl(fd(), FIONREAD, &count) < 0) { - perror("ioctl(FIONREAD)"); + perror("ioctl(FIONREAD) error"); return false; } -#else +#else // _WIN32 u_long count = -1; if (ioctlsocket(fd(), FIONREAD, &count) < 0) { - perror("ioctlsocket(FIONREAD)"); + perror("ioctlsocket(FIONREAD) error"); return false; } -#endif +#endif // _WIN32 return count > 0; } -int socket::last_error() { +int socket::last_error() noexcept { #ifndef _WIN32 return errno; #else return WSAGetLastError(); #endif } -bool socket::would_block(int error) { + +bool socket::would_block(int error) noexcept { #ifndef _WIN32 return error == EAGAIN || error == EWOULDBLOCK || error == EINPROGRESS; #else @@ -241,6 +241,154 @@ bool socket::would_block(int error) { #endif } +int socket::close() { +#ifndef _WIN32 + int ret = ::close(fd_); +#else + int ret = ::closesocket(fd_); +#endif + if (ret < 0) { + perror("close() error"); + } + fd_ = -1; + return ret; +} + +int lst_socket::bind(std::string_view addr, socket_domain t) { + assertrx(!valid()); + int ret = 0; + sock_.domain(t); + if (domain() == socket_domain::tcp) { + struct addrinfo *results = nullptr; + ret = sock_.create(addr, &results); + if rx_unlikely (!ret) { + assertrx(results != nullptr); + ret = ::bind(sock_.fd(), results->ai_addr, results->ai_addrlen); + if rx_unlikely (ret != 0) { + perror("bind error"); + close(); + } + } + if (results) { + freeaddrinfo(results); + } + } else { +#ifdef _WIN32 + return print_not_supported(); +#else // _WIN32 + if (sock_.create(addr, nullptr) < 0) { + return -1; + } + + struct sockaddr_un address; + address.sun_family = AF_UNIX; + memcpy(address.sun_path, addr.data(), addr.size()); + address.sun_path[addr.size()] = 0; + + unPath_ = addr; + unLock_ = unPath_ + ".LOCK"; + assertrx(lockFd_ < 0); + lockFd_ = ::open(unLock_.c_str(), O_WRONLY | O_CREAT | O_APPEND, S_IRWXU); // open(unLock_.c_str(), O_RDONLY | O_CREAT, 0600); + if (lockFd_ < 0) { + perror("open(lock) error"); + close(); + return -1; + } + + struct flock lock; + memset(&lock, 0, sizeof(struct flock)); + lock.l_type = F_WRLCK; + lock.l_start = 0; + lock.l_whence = SEEK_SET; + lock.l_len = 0; + lock.l_pid = getpid(); + + if rx_unlikely (fcntl(lockFd_, F_SETLK, &lock) < 0) { + fprintf(stderr, "Unable to get LOCK for %s\n", unLock_.c_str()); + perror("fcntl(F_SETLK) error"); + close(); + return -1; + } + unlink(unPath_.c_str()); + + if rx_unlikely (::bind(sock_.fd(), reinterpret_cast(&address), sizeof(address)) < 0) { + perror("bind() error"); + close(); + return -1; + } +#endif // _WIN32 + } + return ret; +} + +int lst_socket::listen(int backlog) noexcept { + if (domain() == socket_domain::tcp) { +#ifdef __linux__ + int enable = 1; + + if (setsockopt(sock_.fd(), SOL_TCP, TCP_DEFER_ACCEPT, &enable, sizeof(enable)) < 0) { + perror("setsockopt(TCP_DEFER_ACCEPT) failed"); + } + if (setsockopt(sock_.fd(), SOL_TCP, TCP_QUICKACK, &enable, sizeof(enable)) < 0) { + perror("setsockopt(TCP_QUICKACK) failed"); + } +#endif + } +#ifdef _WIN32 + else { + return print_not_supported(); + } +#endif // _WIN32 + return ::listen(sock_.fd(), backlog); +} + +int lst_socket::close() { + int ret = sock_.close(); + + if (domain() == socket_domain::unx) { + if (!unPath_.empty() && (unlink(unPath_.c_str()) != 0)) { + perror("unix socket unlink error"); + } + if (::close(lockFd_) != 0) { + perror("close(lock) error"); + ret = -1; + } else { + if (::unlink(unLock_.c_str()) != 0) { + perror("lock file unlink error"); + ret = -1; + } + } + unPath_.clear(); + unLock_.clear(); + lockFd_ = -1; + } + return ret; +} + +socket lst_socket::accept() { + struct sockaddr client_addr; + memset(&client_addr, 0, sizeof(client_addr)); + socklen_t client_len = sizeof(client_addr); + +#ifdef __linux__ + socket client(::accept4(sock_.fd(), &client_addr, &client_len, SOCK_NONBLOCK), domain()); + +#else // __linux__ + socket client(::accept(sock_.fd(), &client_addr, &client_len), domain()); + if (client.valid()) { + if rx_unlikely (client.set_nonblock() < 0) { + perror("client.set_nonblock() error"); + } + } +#endif // __linux__ + if rx_likely (client.valid()) { + if rx_unlikely (client.set_nodelay() != 0) { + perror("client.set_nodelay() error"); + } + } + return client; +} + #ifdef _WIN32 class __windows_ev_init { public: diff --git a/cpp_src/net/socket.h b/cpp_src/net/socket.h index 9557dd26c..0cd8f1b5f 100644 --- a/cpp_src/net/socket.h +++ b/cpp_src/net/socket.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include "estl/chunk_buf.h" #include "tools/ssize_t.h" @@ -8,35 +8,108 @@ struct addrinfo; namespace reindexer { namespace net { +enum class socket_domain : bool { tcp, unx }; +class lst_socket; + class socket { public: - socket(const socket &other) = default; - socket &operator=(const socket &other) = default; - socket(int fd = -1) : fd_(fd) {} + socket() = default; + socket(const socket &&other) = delete; + socket(socket &&other) noexcept : fd_(other.fd_), type_(other.type_) { other.fd_ = -1; } + socket &operator=(const socket &other) = delete; + socket &operator=(socket &&other) noexcept { + if rx_likely (this != &other) { + if (valid()) { + close(); + } + fd_ = other.fd_; + type_ = other.type_; + other.fd_ = -1; + } + return *this; + } + ~socket() { + if (valid()) { + close(); + } + } - int bind(std::string_view addr); - int connect(std::string_view addr) noexcept; - socket accept(); - int listen(int backlog); + [[nodiscard]] int connect(std::string_view addr, socket_domain t); ssize_t recv(span buf); ssize_t send(const span buf); ssize_t send(span chunks); int close(); std::string addr() const; - int set_nonblock(); - int set_nodelay(); + [[nodiscard]] int set_nonblock(); + [[nodiscard]] int set_nodelay() noexcept; int fd() const noexcept { return fd_; } bool valid() const noexcept { return fd_ >= 0; } bool has_pending_data() const noexcept; + socket_domain domain() const noexcept { return type_; } - static int last_error(); - static bool would_block(int error); + static int last_error() noexcept; + static bool would_block(int error) noexcept; -protected: +private: + friend class lst_socket; + + socket(int fd, socket_domain type) noexcept : fd_(fd), type_(type) {} int create(std::string_view addr, struct addrinfo **pres); + void domain(socket_domain t) noexcept { type_ = t; } + + int fd_ = -1; + socket_domain type_ = socket_domain::tcp; +}; + +class lst_socket { +public: + lst_socket() = default; + lst_socket(const lst_socket &&other) = delete; + lst_socket(lst_socket &&other) noexcept + : sock_(std::move(other.sock_)), lockFd_(other.lockFd_), unPath_(std::move(other.unPath_)), unLock_(std::move(other.unLock_)) { + other.lockFd_ = -1; + } + lst_socket &operator=(const lst_socket &other) = delete; + lst_socket &operator=(lst_socket &&other) noexcept { + if rx_likely (this != &other) { + if (valid()) { + close(); + } + sock_ = std::move(other.sock_); + lockFd_ = other.lockFd_; + unPath_ = std::move(other.unPath_); + unLock_ = std::move(other.unLock_); + other.lockFd_ = -1; + } + return *this; + } + ~lst_socket() { + if (valid()) { + close(); + } + } - int fd_; + int bind(std::string_view addr, socket_domain t); + socket accept(); + [[nodiscard]] int listen(int backlog) noexcept; + int close(); + + int fd() const noexcept { return sock_.fd(); } + bool valid() const noexcept { return sock_.valid(); } + bool owns_lock() const noexcept { return lockFd_ >= 0; } + + static int last_error() noexcept { return socket::last_error(); } + static bool would_block(int error) noexcept { return socket::would_block(error); } + +private: + socket_domain domain() const noexcept { return sock_.domain(); } + + socket sock_; + int lockFd_ = -1; + std::string unPath_; + std::string unLock_; }; + } // namespace net } // namespace reindexer diff --git a/cpp_src/readme.md b/cpp_src/readme.md index c7229906d..05c6e0cfb 100644 --- a/cpp_src/readme.md +++ b/cpp_src/readme.md @@ -166,7 +166,7 @@ http.Handle("/metrics", promhttp.Handler()) All of the metricts will be exported into `DefaultRegistry`. Check [this](https://github.com/prometheus/client_golang/blob/main/prometheus/promauto/auto.go#L57-L85) for basic prometheus usage example. -Both server-side and client-side metrics contain 'latency', however, client-side latency will also count all the time consumed by the binding's queue, network communication (for cproto) and deseriallization. +Both server-side and client-side metrics contain 'latency', however, client-side latency will also count all the time consumed by the binding's queue, network communication (for cproto/ucproto) and deseriallization. So client-side latency may be more rellevant for user's applications the server-side latency. ## Maintenance diff --git a/cpp_src/replicator/replicator.cc b/cpp_src/replicator/replicator.cc index 4717903e3..4ba66b2ce 100644 --- a/cpp_src/replicator/replicator.cc +++ b/cpp_src/replicator/replicator.cc @@ -15,8 +15,6 @@ namespace reindexer { using namespace net; using namespace std::string_view_literals; -static constexpr size_t kTmpNsPostfixLen = 20; - Replicator::Replicator(ReindexerImpl *slave) : slave_(slave), resyncUpdatesLostFlag_(false), @@ -515,9 +513,9 @@ Error Replicator::syncNamespaceByWAL(const NamespaceDef &nsDef) { case errQrUIDMissmatch: case errSystem: case errAssert: - default: - return err; + break; } + return err; } // Forced namespace sync @@ -535,7 +533,7 @@ Error Replicator::syncNamespaceForced(const NamespaceDef &ns, std::string_view r tmpNsDef.storage = StorageOpts().SlaveMode(); } - tmpNsDef.name = "@" + ns.name + "_tmp_" + randStringAlph(kTmpNsPostfixLen); + tmpNsDef.name = createTmpNamespaceName(ns.name); auto dropTmpNs = [this, &tmpNsDef] { auto tmpNs = slave_->getNamespaceNoThrow(tmpNsDef.name, dummyCtx_); if (tmpNs) { @@ -833,7 +831,7 @@ Error Replicator::applyWALRecord(LSNPair LSNs, std::string_view nsName, Namespac break; } case WalSetSchema: - slaveNs->SetSchema(rec.data, dummyCtx_); + slaveNs->SetSchema(rec.data, rdxContext); stat.schemasSet++; break; case WalEmpty: @@ -1062,13 +1060,13 @@ bool Replicator::canApplyUpdate(LSNPair LSNs, std::string_view nsName, const WAL if (!isSyncEnabled(nsName)) return false; if (terminate_.load(std::memory_order_acquire)) { - logPrintf(LogTrace, "[repl:%s]:%d Skipping update due to replicator shutdown is in progress upstreamLSN %s", nsName, - config_.serverId, LSNs.upstreamLSN_); + logPrintf(LogTrace, "[repl:%s]:%d Skipping update due to replicator shutdown is in progress upstreamLSN %s (%d)", nsName, + config_.serverId, LSNs.upstreamLSN_, int(wrec.type)); return false; } if (state_.load(std::memory_order_acquire) == StateIdle && !resyncUpdatesLostFlag_) { - logPrintf(LogTrace, "[repl:%s]:%d apply update upstreamLSN %s", nsName, config_.serverId, LSNs.upstreamLSN_); + logPrintf(LogTrace, "[repl:%s]:%d apply update upstreamLSN %s (%d)", nsName, config_.serverId, LSNs.upstreamLSN_, int(wrec.type)); return true; } @@ -1076,28 +1074,32 @@ bool Replicator::canApplyUpdate(LSNPair LSNs, std::string_view nsName, const WAL auto state = state_.load(std::memory_order_acquire); if (state == StateIdle) { if (!resyncUpdatesLostFlag_) { - logPrintf(LogTrace, "[repl:%s]:%d apply update upstreamLSN %s", nsName, config_.serverId, LSNs.upstreamLSN_); + logPrintf(LogTrace, "[repl:%s]:%d apply update upstreamLSN %s (%d)", nsName, config_.serverId, LSNs.upstreamLSN_, + int(wrec.type)); return true; } else { auto it = pendedUpdates_.find(nsName); if (it != pendedUpdates_.end()) { if (it->second.UpdatesLost) { - logPrintf(LogTrace, "[repl:%s]:%d NOT APPLY update lost %s", nsName, config_.serverId, LSNs.upstreamLSN_); + logPrintf(LogTrace, "[repl:%s]:%d NOT APPLY update lost %s (%d)", nsName, config_.serverId, LSNs.upstreamLSN_, + int(wrec.type)); return false; } else { - logPrintf(LogTrace, "[repl:%s]:%d apply update pendeded not empty %s", nsName, config_.serverId, LSNs.upstreamLSN_); + logPrintf(LogTrace, "[repl:%s]:%d apply update pendeded not empty %s (%d)", nsName, config_.serverId, LSNs.upstreamLSN_, + int(wrec.type)); return true; } } else { - logPrintf(LogTrace, "[repl:%s]:%d apply update pendeded empty %s", nsName, config_.serverId, LSNs.upstreamLSN_); + logPrintf(LogTrace, "[repl:%s]:%d apply update pendeded empty %s (%d)", nsName, config_.serverId, LSNs.upstreamLSN_, + int(wrec.type)); return true; } } } bool terminate = terminate_.load(std::memory_order_acquire); if (state == StateInit || terminate) { - logPrintf(LogTrace, "[repl:%s]:%d Skipping update due to replicator %s is in progress upstreamLSN %s", nsName, config_.serverId, - terminate ? "shutdown" : "startup", LSNs.upstreamLSN_); + logPrintf(LogTrace, "[repl:%s]:%d Skipping update due to replicator %s is in progress upstreamLSN %s (%d)", nsName, + config_.serverId, terminate ? "shutdown" : "startup", LSNs.upstreamLSN_, int(wrec.type)); return false; } @@ -1107,11 +1109,12 @@ bool Replicator::canApplyUpdate(LSNPair LSNs, std::string_view nsName, const WAL if (std::string_view(currentSyncNs_) != nsName) { if (syncedNamespaces_.find(nsName) != syncedNamespaces_.end()) { - logPrintf(LogTrace, "[repl:%s]:%d applying update for synced ns %s", nsName, config_.serverId, LSNs.upstreamLSN_); + logPrintf(LogTrace, "[repl:%s]:%d applying update for synced ns %s (%d)", nsName, config_.serverId, LSNs.upstreamLSN_, + int(wrec.type)); return true; } else { - logPrintf(LogTrace, "[repl:%s]:%d Skipping update - namespace was not synced yet, upstreamLSN %s", nsName, config_.serverId, - LSNs.upstreamLSN_); + logPrintf(LogTrace, "[repl:%s]:%d Skipping update - namespace was not synced yet, upstreamLSN %s (%d)", nsName, + config_.serverId, LSNs.upstreamLSN_, int(wrec.type)); if (wrec.type == WalNamespaceAdd || wrec.type == WalNamespaceDrop || wrec.type == WalNamespaceRename) { logPrintf(LogInfo, "[repl:%s]:%d Scheduling resync due to concurrent ns add/delete: %d", nsName, config_.serverId, int(wrec.type)); diff --git a/cpp_src/replicator/walrecord.cc b/cpp_src/replicator/walrecord.cc index a9daa40f4..436a7044f 100644 --- a/cpp_src/replicator/walrecord.cc +++ b/cpp_src/replicator/walrecord.cc @@ -1,7 +1,6 @@ #include "walrecord.h" #include "core/cjson/baseencoder.h" -#include "core/transactionimpl.h" #include "tools/logger.h" #include "tools/serializer.h" @@ -148,7 +147,7 @@ static std::string_view wrecType2Str(WALRecType t) { } } -WrSerializer &WALRecord::Dump(WrSerializer &ser, const std::function& cjsonViewer) const { +WrSerializer &WALRecord::Dump(WrSerializer &ser, const std::function &cjsonViewer) const { ser << wrecType2Str(type); if (inTransaction) ser << " InTransaction"; switch (type) { @@ -182,7 +181,7 @@ WrSerializer &WALRecord::Dump(WrSerializer &ser, const std::function& cjsonViewer) const { +void WALRecord::GetJSON(JsonBuilder &jb, const std::function &cjsonViewer) const { jb.Put("type", wrecType2Str(type)); jb.Put("in_transaction", inTransaction); diff --git a/cpp_src/replicator/walselecter.cc b/cpp_src/replicator/walselecter.cc index d853a90d3..23da38c5c 100644 --- a/cpp_src/replicator/walselecter.cc +++ b/cpp_src/replicator/walselecter.cc @@ -1,4 +1,3 @@ - #include "walselecter.h" #include "core/cjson/jsonbuilder.h" #include "core/namespace/namespaceimpl.h" @@ -15,8 +14,8 @@ WALSelecter::WALSelecter(const NamespaceImpl *ns) : ns_(ns) {} void WALSelecter::operator()(QueryResults &result, SelectCtx ¶ms) { using namespace std::string_view_literals; const Query &q = params.query; - int count = q.count; - int start = q.start; + int count = q.Limit(); + int start = q.Offset(); result.totalCount = 0; if (!q.IsWALQuery()) { @@ -31,20 +30,20 @@ void WALSelecter::operator()(QueryResults &result, SelectCtx ¶ms) { q.entries.InvokeAppropriate( i, [&lsnIdx, &versionIdx, i](const QueryEntry &qe) { - if ("#lsn"sv == qe.index) { + if ("#lsn"sv == qe.FieldName()) { lsnIdx = i; - } else if ("#slave_version"sv == qe.index) { + } else if ("#slave_version"sv == qe.FieldName()) { versionIdx = i; } else { - throw Error(errLogic, "Unexpected index in WAL select query: %s", qe.index); + throw Error(errLogic, "Unexpected index in WAL select query: %s", qe.FieldName()); } }, [&q](const auto &) { throw Error(errLogic, "Unexpected WAL select query: %s", q.GetSQL()); }); } - auto slaveVersion = versionIdx < 0 ? SemVersion() : SemVersion(q.entries.Get(versionIdx).values[0].As()); + auto slaveVersion = versionIdx < 0 ? SemVersion() : SemVersion(q.entries.Get(versionIdx).Values()[0].As()); auto &lsnEntry = q.entries.Get(lsnIdx); - if (lsnEntry.values.size() == 1 && lsnEntry.condition == CondGt) { - lsn_t fromLSN = lsn_t(std::min(lsnEntry.values[0].As(), std::numeric_limits::max() - 1)); + if (lsnEntry.Values().size() == 1 && lsnEntry.Condition() == CondGt) { + lsn_t fromLSN = lsn_t(std::min(lsnEntry.Values()[0].As(), std::numeric_limits::max() - 1)); if (fromLSN.Server() != ns_->serverId_) throw Error(errOutdatedWAL, "Query to WAL with incorrect LSN %ld, LSN counter %ld", int64_t(fromLSN), ns_->wal_.LSNCounter()); if (ns_->wal_.LSNCounter() != (fromLSN.Counter() + 1) && ns_->wal_.is_outdated(fromLSN.Counter() + 1) && count) @@ -73,7 +72,7 @@ void WALSelecter::operator()(QueryResults &result, SelectCtx ¶ms) { if (versionIdx < 0) { break; } - if (q.entries.Get(versionIdx).condition != CondEq || slaveVersion < kMinUnknownReplSupportRxVersion) { + if (q.entries.Get(versionIdx).Condition() != CondEq || slaveVersion < kMinUnknownReplSupportRxVersion) { break; } // fall-through @@ -110,7 +109,7 @@ void WALSelecter::operator()(QueryResults &result, SelectCtx ¶ms) { std::abort(); } } - } else if (lsnEntry.condition == CondAny) { + } else if (lsnEntry.Condition() == CondAny) { if (start == 0 && !(slaveVersion < kMinUnknownReplSupportRxVersion)) { auto addSpRecord = [&result](const WALRecord &wrec) { PackedWALRecord wr; diff --git a/cpp_src/server/clientsstats.cc b/cpp_src/server/clientsstats.cc index d6b387075..f8b84e863 100644 --- a/cpp_src/server/clientsstats.cc +++ b/cpp_src/server/clientsstats.cc @@ -29,6 +29,7 @@ void ClientsStats::GetClientInfo(std::vector& datas) { reindexer::deepCopy(d.dbName, c.second.dbName); reindexer::deepCopy(d.ip, c.second.ip); reindexer::deepCopy(d.userName, c.second.userName); + d.protocol = c.second.protocol; reindexer::deepCopy(d.userRights, c.second.userRights); reindexer::deepCopy(d.clientVersion, c.second.clientVersion); reindexer::deepCopy(d.appName, c.second.appName); diff --git a/cpp_src/server/clientsstats.h b/cpp_src/server/clientsstats.h index a12cc1478..cbd865f0f 100644 --- a/cpp_src/server/clientsstats.h +++ b/cpp_src/server/clientsstats.h @@ -6,7 +6,7 @@ namespace reindexer_server { -class ClientsStats : public reindexer::IClientsStats { +class ClientsStats final : public reindexer::IClientsStats { public: void GetClientInfo(std::vector& datas) override final; void AddConnection(int64_t connectionId, reindexer::ClientConnectionStat&& conn) override final; diff --git a/cpp_src/server/config.cc b/cpp_src/server/config.cc index 08523c4c5..53c42bd6f 100644 --- a/cpp_src/server/config.cc +++ b/cpp_src/server/config.cc @@ -13,8 +13,10 @@ void ServerConfig::Reset() { StorageEngine = "leveldb"; HTTPAddr = "0.0.0.0:9088"; RPCAddr = "0.0.0.0:6534"; + RPCUnixAddr = ""; GRPCAddr = "0.0.0.0:16534"; RPCThreadingMode = kSharedThreading; + RPCUnixThreadingMode = kSharedThreading; HttpThreadingMode = kSharedThreading; LogLevel = "info"; ServerLog = "stdout"; @@ -43,6 +45,8 @@ void ServerConfig::Reset() { EnableConnectionsStats = true; TxIdleTimeout = std::chrono::seconds(600); RPCQrIdleTimeout = std::chrono::seconds(600); + HttpReadTimeout = std::chrono::seconds(0); + HttpWriteTimeout = std::chrono::seconds(0); MaxUpdatesSize = 1024 * 1024 * 1024; EnableGRPC = false; MaxHttpReqSize = 2 * 1024 * 1024; @@ -52,7 +56,6 @@ void ServerConfig::Reset() { const std::string ServerConfig::kDedicatedThreading = "dedicated"; const std::string ServerConfig::kSharedThreading = "shared"; -const std::string ServerConfig::kPoolThreading = "pool"; reindexer::Error ServerConfig::ParseYaml(const std::string &yaml) { Error err; @@ -108,10 +111,19 @@ Error ServerConfig::ParseCmd(int argc, char *argv[]) { args::Group netGroup(parser, "Network options"); args::ValueFlag httpAddrF(netGroup, "PORT", "http listen host:port", {'p', "httpaddr"}, HTTPAddr, args::Options::Single); args::ValueFlag rpcAddrF(netGroup, "RPORT", "RPC listen host:port", {'r', "rpcaddr"}, RPCAddr, args::Options::Single); - args::ValueFlag rpcThreadingModeF(netGroup, "RTHREADING", "RPC connections threading mode: shared or dedicated", - {'X', "rpc-threading"}, RPCThreadingMode, args::Options::Single); +#ifndef _WIN32 + args::ValueFlag rpcUnixAddrF(netGroup, "URPORT", "RPC listen path (unix domain socket)", {"urpcaddr"}, RPCUnixAddr, + args::Options::Single); +#endif // !_WIN32 args::ValueFlag httpThreadingModeF(netGroup, "HTHREADING", "HTTP connections threading mode: shared or dedicated", {"http-threading"}, HttpThreadingMode, args::Options::Single); + args::ValueFlag rpcThreadingModeF(netGroup, "RTHREADING", "RPC connections threading mode: shared or dedicated", + {'X', "rpc-threading"}, RPCThreadingMode, args::Options::Single); +#ifndef _WIN32 + args::ValueFlag rpcUnixThreadingModeF(netGroup, "URTHREADING", + "RPC connections threading mode: shared or dedicated (unix domain socket)", + {"urpc-threading"}, RPCUnixThreadingMode, args::Options::Single); +#endif // _WIN32 args::ValueFlag MaxHttpReqSizeF( netGroup, "", "Max HTTP request size in bytes. Default value is 2 MB. 0 is 'unlimited', hovewer, stream mode is not supported", {"max-http-req"}, MaxHttpReqSize, args::Options::Single); @@ -121,6 +133,11 @@ Error ServerConfig::ParseCmd(int argc, char *argv[]) { #endif args::ValueFlag webRootF(netGroup, "PATH", "web root. This path if set overrides linked-in resources", {'w', "webroot"}, WebRoot, args::Options::Single); + args::ValueFlag httpReadTimeoutF(netGroup, "", "timeout (s) for HTTP read operations (i.e. selects, get meta and others)", + {"http-read-timeout"}, HttpReadTimeout.count(), args::Options::Single); + args::ValueFlag httpWriteTimeoutF(netGroup, "", + "timeout (s) for HTTP write operations (i.e. update, delete, put meta, add index and others)", + {"http-write-timeout"}, HttpWriteTimeout.count(), args::Options::Single); args::ValueFlag maxUpdatesSizeF(netGroup, "", "Maximum cached updates size", {"updatessize"}, MaxUpdatesSize, args::Options::Single); args::Flag pprofF(netGroup, "", "Enable pprof http handler", {'f', "pprof"}); @@ -203,6 +220,8 @@ Error ServerConfig::ParseCmd(int argc, char *argv[]) { if (webRootF) WebRoot = args::get(webRootF); if (MaxHttpReqSizeF) MaxHttpReqSize = args::get(MaxHttpReqSizeF); #ifndef _WIN32 + if (rpcUnixAddrF) RPCUnixAddr = args::get(rpcUnixAddrF); + if (rpcUnixThreadingModeF) RPCUnixThreadingMode = args::get(rpcUnixThreadingModeF); if (userF) UserName = args::get(userF); if (daemonizeF) Daemonize = args::get(daemonizeF); if (daemonPidFileF) DaemonPidFile = args::get(daemonPidFileF); @@ -231,6 +250,8 @@ Error ServerConfig::ParseCmd(int argc, char *argv[]) { if (prometheusF) EnablePrometheus = args::get(prometheusF); if (prometheusPeriodF) PrometheusCollectPeriod = std::chrono::milliseconds(args::get(prometheusPeriodF)); if (clientsConnectionsStatF) EnableConnectionsStats = args::get(clientsConnectionsStatF); + if (httpReadTimeoutF) HttpReadTimeout = std::chrono::seconds(args::get(httpReadTimeoutF)); + if (httpWriteTimeoutF) HttpWriteTimeout = std::chrono::seconds(args::get(httpWriteTimeoutF)); if (logAllocsF) DebugAllocs = args::get(logAllocsF); if (txIdleTimeoutF) TxIdleTimeout = std::chrono::seconds(args::get(txIdleTimeoutF)); if (rpcQrIdleTimeoutF) RPCQrIdleTimeout = std::chrono::seconds(args::get(rpcQrIdleTimeoutF)); @@ -261,12 +282,16 @@ reindexer::Error ServerConfig::fromYaml(YAML::Node &root) { EnableGRPC = root["net"]["grpc"].as(EnableGRPC); GRPCAddr = root["net"]["grpcaddr"].as(GRPCAddr); TxIdleTimeout = std::chrono::seconds(root["net"]["tx_idle_timeout"].as(TxIdleTimeout.count())); + HttpReadTimeout = std::chrono::seconds(root["net"]["http_read_timeout"].as(HttpReadTimeout.count())); + HttpWriteTimeout = std::chrono::seconds(root["net"]["http_write_timeout"].as(HttpWriteTimeout.count())); RPCQrIdleTimeout = std::chrono::seconds(root["net"]["rpc_qr_idle_timeout"].as(RPCQrIdleTimeout.count())); MaxHttpReqSize = root["net"]["max_http_body_size"].as(MaxHttpReqSize); EnablePrometheus = root["metrics"]["prometheus"].as(EnablePrometheus); PrometheusCollectPeriod = std::chrono::milliseconds(root["metrics"]["collect_period"].as(PrometheusCollectPeriod.count())); EnableConnectionsStats = root["metrics"]["clientsstats"].as(EnableConnectionsStats); #ifndef _WIN32 + RPCUnixAddr = root["net"]["urpcaddr"].as(RPCUnixAddr); + RPCUnixThreadingMode = root["net"]["urpc_threading"].as(RPCUnixThreadingMode); UserName = root["system"]["user"].as(UserName); Daemonize = root["system"]["daemonize"].as(Daemonize); DaemonPidFile = root["system"]["pidfile"].as(DaemonPidFile); diff --git a/cpp_src/server/config.h b/cpp_src/server/config.h index 147b10ccc..70c100a32 100644 --- a/cpp_src/server/config.h +++ b/cpp_src/server/config.h @@ -36,7 +36,9 @@ struct ServerConfig { std::string StorageEngine; std::string HTTPAddr; std::string RPCAddr; + std::string RPCUnixAddr; std::string RPCThreadingMode; + std::string RPCUnixThreadingMode; std::string HttpThreadingMode; std::string LogLevel; std::string ServerLog; @@ -63,6 +65,8 @@ struct ServerConfig { std::chrono::milliseconds PrometheusCollectPeriod; bool DebugAllocs; std::chrono::seconds TxIdleTimeout; + std::chrono::seconds HttpReadTimeout; + std::chrono::seconds HttpWriteTimeout; size_t MaxUpdatesSize; bool EnableGRPC; std::string GRPCAddr; @@ -73,7 +77,6 @@ struct ServerConfig { static const std::string kDedicatedThreading; static const std::string kSharedThreading; - static const std::string kPoolThreading; protected: Error fromYaml(YAML::Node& root); diff --git a/cpp_src/server/contrib/server.md b/cpp_src/server/contrib/server.md index 7b092c81c..42753b29c 100644 --- a/cpp_src/server/contrib/server.md +++ b/cpp_src/server/contrib/server.md @@ -1584,7 +1584,7 @@ This operation pareses SQL query, and suggests autocompletion variants |Type|Name|Description|Schema| |---|---|---|---| |**Path**|**database**
*required*|Database name|string| -|**Query**|**line**
*optional*|Cursor line for suggest|integer| +|**Query**|**line**
*required*|Cursor line for suggest|integer| |**Query**|**pos**
*required*|Cursor position for suggest|integer| |**Query**|**q**
*required*|SQL query|string| diff --git a/cpp_src/server/contrib/server.yml b/cpp_src/server/contrib/server.yml index d2906ac08..15bd69115 100644 --- a/cpp_src/server/contrib/server.yml +++ b/cpp_src/server/contrib/server.yml @@ -54,6 +54,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -83,6 +85,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -111,6 +115,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -144,6 +150,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" get: @@ -178,6 +186,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -211,6 +221,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" delete: @@ -242,6 +254,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -273,6 +287,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -310,6 +326,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -369,6 +387,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -407,6 +427,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -446,6 +468,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -498,6 +522,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" post: @@ -557,6 +583,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" put: @@ -616,6 +644,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -676,6 +706,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" get: @@ -744,6 +776,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -777,6 +811,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" post: @@ -813,6 +849,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" put: @@ -849,6 +887,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -886,6 +926,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -923,6 +965,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" get: @@ -953,6 +997,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -989,6 +1035,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1052,6 +1100,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" post: @@ -1104,6 +1154,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" put: @@ -1134,6 +1186,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" delete: @@ -1164,6 +1218,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1204,6 +1260,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1233,6 +1291,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1262,6 +1322,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1313,6 +1375,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" post: @@ -1371,6 +1435,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" put: @@ -1429,6 +1495,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" patch: @@ -1487,6 +1555,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1538,6 +1608,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" delete: @@ -1579,6 +1651,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1610,7 +1684,7 @@ paths: in: query type: integer description: "Cursor line for suggest" - required: false + required: true responses: 200: description: "successful operation" @@ -1622,6 +1696,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1676,6 +1752,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1697,6 +1775,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1724,6 +1804,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1752,6 +1834,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1779,6 +1863,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1806,6 +1892,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1833,6 +1921,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1860,6 +1950,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1895,6 +1987,8 @@ paths: $ref: "#/responses/Forbidden" 404: $ref: "#/responses/NotFound" + 408: + $ref: "#/responses/RequestTimeout" 500: $ref: "#/responses/UnexpectedError" @@ -1907,6 +2001,10 @@ responses: description: "Invalid arguments supplied" schema: $ref: "#/definitions/StatusResponse" + RequestTimeout: + description: "Context timeout" + schema: + $ref: "#/definitions/StatusResponse" Forbidden: description: "Forbidden" schema: @@ -3109,6 +3207,12 @@ definitions: field: type: string description: "Field or index name" + field_type: + type: string + enum: + - "non-indexed" + - "indexed" + description: "Shows which kind of the field was used for the filtration. Non-indexed fields are usually really slow for 'scan' and should be avoided" items: type: integer description: "Count of scanned documents by this selector" @@ -3130,7 +3234,7 @@ definitions: explain_select: description: "One of selects in joined namespace execution explainings" $ref: "#/definitions/ExplainDef" - join_on_conditions: + on_conditions_injections: type: array description: "Describes Join ON conditions injections" items: @@ -3262,32 +3366,9 @@ definitions: properties: success: type: boolean - description: "Status of operation" response_code: type: integer - enum: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] - description: > - Error code: - * 0 - errOK - * 1 - errParseSQL - * 2 - errQueryExec - * 3 - errParams - * 4 - errLogic - * 5 - errParseJson - * 6 - errParseDSL - * 7 - errConflict - * 8 - errParseBin - * 9 - errForbidden - * 10 - errWasRelock - * 11 - errNotValid - * 12 - errNetwork - * 13 - errNotFound - * 14 - errStateInvalidated - * 15 - errBadTransaction - * 16 - errOutdatedWAL - * 17 - errNoWAL - * 18 - errDataHashMismatch - + description: "Duplicates HTTP response code" description: type: string description: "Text description of error details" @@ -3834,6 +3915,50 @@ definitions: default: 20000 minimun: 0 description: "Enables synchronous storage flush inside write-calls, if async updates count is more than sync_storage_flush_limit. 0 - disables synchronous storage flush, in this case storage will be flushed in background thread only" + cache: + type: object + properties: + index_idset_cache_size: + type: integer + default: 134217728 + minimun: 0 + description: "Max size of the index IdSets cache in bytes (per index). Each index has it's own independant cache. This cache is used in any selections to store resulting sets of internal document IDs (it does not stores documents' content itself)" + index_idset_hits_to_cache: + type: integer + default: 2 + minimun: 0 + description: "Default 'hits to cache' for index IdSets caches. This value determines how many requests required to put results into cache. For example with value of 2: first request will be executed without caching, second request will generate cache entry and put results into the cache and third request will get cached results. This value may be automatically increased if cache is invalidation too fast" + ft_index_cache_size: + type: integer + default: 134217728 + minimun: 0 + description: "Max size of the fulltext indexes IdSets cache in bytes (per index). Each fulltext index has it's own independant cache. This cache is used in any selections to store resulting sets of internal document IDs, FT ranks and highlighted areas (it does not stores documents' content itself)" + ft_index_hits_to_cache: + type: integer + default: 2 + minimun: 0 + description: "Default 'hits to cache' for fulltext index IdSets caches. This value determines how many requests required to put results into cache. For example with value of 2: first request will be executed without caching, second request will generate cache entry and put results into the cache and third request will get cached results. This value may be automatically increased if cache is invalidation too fast" + joins_preselect_cache_size: + type: integer + default: 268435456 + minimun: 0 + description: "Max size of the index IdSets cache in bytes for each namespace. This cache will be enabled only if 'join_cache_mode' property is not 'off'. It stores resulting IDs, serialized JOINed queries and any other 'preselect' information for the JOIN queries (when target namespace is right namespace of the JOIN)" + joins_preselect_hit_to_cache: + type: integer + default: 2 + minimun: 0 + description: "Default 'hits to cache' for joins preselect cache of the current namespace. This value determines how many requests required to put results into cache. For example with value of 2: first request will be executed without caching, second request will generate cache entry and put results into the cache and third request will get cached results. This value may be automatically increased if cache is invalidation too fast" + query_count_cache_size: + type: integer + default: 134217728 + minimun: 0 + description: "Max size of the cache for COUNT_CACHED() aggreagetion in bytes for each namespace. This cache stores resulting COUNTs and serialized queries for the COUNT_CACHED() aggregations" + query_count_hit_to_cache: + type: integer + default: 2 + minimun: 0 + description: "Default 'hits to cache' for COUNT_CACHED() aggregation of the current namespace. This value determines how many requests required to put results into cache. For example with value of 2: first request will be executed without caching, second request will generate cache entry and put results into the cache and third request will get cached results. This value may be automatically increased if cache is invalidation too fast" + ReplicationConfig: type: object diff --git a/cpp_src/server/grpc/reindexerservice.cc b/cpp_src/server/grpc/reindexerservice.cc index a3a66b58c..22085eedd 100644 --- a/cpp_src/server/grpc/reindexerservice.cc +++ b/cpp_src/server/grpc/reindexerservice.cc @@ -980,7 +980,7 @@ Error ReindexerService::execSqlQueryByType(QueryResults& res, const SelectSqlReq return rx->Update(q, res); } case QueryTruncate: { - return rx->TruncateNamespace(q._namespace); + return rx->TruncateNamespace(q.NsName()); } default: return Error(errParams, "unknown query type %d", q.Type()); diff --git a/cpp_src/server/httpserver.cc b/cpp_src/server/httpserver.cc index a6720bfbf..65bd6dc2f 100644 --- a/cpp_src/server/httpserver.cc +++ b/cpp_src/server/httpserver.cc @@ -54,25 +54,16 @@ Error HTTPServer::execSqlQueryByType(std::string_view sqlQuery, reindexer::Query reindexer::Query q; q.FromSQL(sqlQuery); switch (q.Type()) { - case QuerySelect: { - auto db = getDB(ctx, kRoleDataRead); - return db.Select(q, res); - } - case QueryDelete: { - auto db = getDB(ctx, kRoleDataWrite); - return db.Delete(q, res); - } - case QueryUpdate: { - auto db = getDB(ctx, kRoleDataWrite); - return db.Update(q, res); - } - case QueryTruncate: { - auto db = getDB(ctx, kRoleDBAdmin); - return db.TruncateNamespace(q._namespace); - } - default: - throw Error(errParams, "unknown query type %d", q.Type()); + case QuerySelect: + return getDB(ctx).Select(q, res); + case QueryDelete: + return getDB(ctx).Delete(q, res); + case QueryUpdate: + return getDB(ctx).Update(q, res); + case QueryTruncate: + return getDB(ctx).TruncateNamespace(q.NsName()); } + throw Error(errParams, "unknown query type %d", q.Type()); } int HTTPServer::GetSQLQuery(http::Context &ctx) { @@ -85,7 +76,7 @@ int HTTPServer::GetSQLQuery(http::Context &ctx) { unsigned offset = prepareOffset(offsetParam); if (sqlQuery.empty()) { - return status(ctx, http::HttpStatus(http::StatusBadRequest, "Missed `q` parameter")); + return status(ctx, http::HttpStatus(http::StatusBadRequest, "Missing `q` parameter")); } reindexer::QueryResults res; auto ret = execSqlQueryByType(sqlQuery, res, ctx); @@ -99,11 +90,17 @@ int HTTPServer::GetSQLQuery(http::Context &ctx) { int HTTPServer::GetSQLSuggest(http::Context &ctx) { std::string sqlQuery = urldecode2(ctx.request->params.Get("q")); if (sqlQuery.empty()) { - return jsonStatus(ctx, http::HttpStatus(http::StatusBadRequest, "Missed `q` parameter")); + return jsonStatus(ctx, http::HttpStatus(http::StatusBadRequest, "Missing `q` parameter")); } std::string_view posParam = ctx.request->params.Get("pos"); + if (posParam.empty()) { + return jsonStatus(ctx, http::HttpStatus(http::StatusBadRequest, "Missing `pos` parameter")); + } std::string_view lineParam = ctx.request->params.Get("line"); + if (lineParam.empty()) { + return jsonStatus(ctx, http::HttpStatus(http::StatusBadRequest, "Missing `line` parameter")); + } int pos = stoi(posParam); if (pos < 0) { return jsonStatus(ctx, http::HttpStatus(http::StatusBadRequest, "`pos` parameter should be >= 0")); @@ -122,8 +119,10 @@ int HTTPServer::GetSQLSuggest(http::Context &ctx) { logPrintf(LogTrace, "GetSQLSuggest() incoming data: %s, %d", sqlQuery, bytePos); std::vector suggestions; - auto db = getDB(ctx, kRoleDataRead); - db.GetSqlSuggestions(sqlQuery, bytePos, suggestions); + err = getDB(ctx).GetSqlSuggestions(sqlQuery, bytePos, suggestions); + if (!err.ok()) { + return jsonStatus(ctx, http::HttpStatus(http::StatusBadRequest, err.what())); + } WrSerializer ser(ctx.writer->GetChunk()); reindexer::JsonBuilder builder(ser); @@ -143,13 +142,13 @@ int HTTPServer::PostSQLQuery(http::Context &ctx) { } auto ret = execSqlQueryByType(sqlQuery, res, ctx); if (!ret.ok()) { - return status(ctx, http::HttpStatus(http::StatusInternalServerError, ret.what())); + return status(ctx, http::HttpStatus(ret)); } return queryResults(ctx, res, true); } int HTTPServer::PostQuery(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataRead); + auto db = getDB(ctx); reindexer::QueryResults res; std::string dsl = ctx.body->Read(); @@ -167,7 +166,7 @@ int HTTPServer::PostQuery(http::Context &ctx) { } int HTTPServer::DeleteQuery(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataWrite); + auto db = getDB(ctx); std::string dsl = ctx.body->Read(); reindexer::Query q; @@ -190,7 +189,7 @@ int HTTPServer::DeleteQuery(http::Context &ctx) { } int HTTPServer::UpdateQuery(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataWrite); + auto db = getDB(ctx); std::string dsl = ctx.body->Read(); reindexer::Query q; @@ -303,12 +302,13 @@ int HTTPServer::DeleteDatabase(http::Context &ctx) { } int HTTPServer::GetNamespaces(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataRead); - std::string_view sortOrder = ctx.request->params.Get("sort_order"); std::vector nsDefs; - db.EnumNamespaces(nsDefs, EnumNamespacesOpts().OnlyNames()); + const auto err = getDB(ctx).EnumNamespaces(nsDefs, EnumNamespacesOpts().OnlyNames()); + if (!err.ok()) { + return jsonStatus(ctx, http::HttpStatus(err)); + } int sortDirection = 0; if (sortOrder == "asc") { @@ -342,7 +342,7 @@ int HTTPServer::GetNamespaces(http::Context &ctx) { } int HTTPServer::GetNamespace(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataRead); + auto db = getDB(ctx); std::string nsName = urldecode2(ctx.request->urlParams[1]); @@ -351,7 +351,10 @@ int HTTPServer::GetNamespace(http::Context &ctx) { } std::vector nsDefs; - db.EnumNamespaces(nsDefs, EnumNamespacesOpts().WithFilter(nsName)); + const auto err = db.EnumNamespaces(nsDefs, EnumNamespacesOpts().WithFilter(nsName)); + if (!err.ok()) { + return jsonStatus(ctx, http::HttpStatus(err)); + } if (nsDefs.empty()) { return jsonStatus(ctx, http::HttpStatus(http::StatusNotFound, "Namespace is not found")); @@ -363,7 +366,7 @@ int HTTPServer::GetNamespace(http::Context &ctx) { } int HTTPServer::PostNamespace(http::Context &ctx) { - auto db = getDB(ctx, kRoleDBAdmin); + auto db = getDB(ctx); reindexer::NamespaceDef nsdef(""); std::string body = ctx.body->Read(); @@ -381,7 +384,7 @@ int HTTPServer::PostNamespace(http::Context &ctx) { } int HTTPServer::DeleteNamespace(http::Context &ctx) { - auto db = getDB(ctx, kRoleDBAdmin); + auto db = getDB(ctx); std::string nsName = urldecode2(ctx.request->urlParams[1]); if (nsName.empty()) { @@ -399,7 +402,7 @@ int HTTPServer::DeleteNamespace(http::Context &ctx) { } int HTTPServer::TruncateNamespace(http::Context &ctx) { - auto db = getDB(ctx, kRoleDBAdmin); + auto db = getDB(ctx); std::string nsName = urldecode2(ctx.request->urlParams[1]); if (nsName.empty()) { @@ -417,7 +420,7 @@ int HTTPServer::TruncateNamespace(http::Context &ctx) { } int HTTPServer::RenameNamespace(http::Context &ctx) { - auto db = getDB(ctx, kRoleDBAdmin); + auto db = getDB(ctx); std::string srcNsName = urldecode2(ctx.request->urlParams[1]); std::string dstNsName = urldecode2(ctx.request->urlParams[2]); @@ -440,7 +443,7 @@ int HTTPServer::RenameNamespace(http::Context &ctx) { } int HTTPServer::GetItems(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataRead); + auto db = getDB(ctx); std::string nsName = urldecode2(ctx.request->urlParams[1]); @@ -490,7 +493,7 @@ int HTTPServer::GetItems(http::Context &ctx) { reindexer::QueryResults res; auto ret = db.Select(q, res); if (!ret.ok()) { - return status(ctx, http::HttpStatus(http::StatusInternalServerError, ret.what())); + return status(ctx, http::HttpStatus(ret)); } return queryResults(ctx, res); @@ -503,7 +506,7 @@ int HTTPServer::PostItems(http::Context &ctx) { return modifyItems(ctx, ModeInse int HTTPServer::PatchItems(http::Context &ctx) { return modifyItems(ctx, ModeUpsert); } int HTTPServer::GetMetaList(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataRead); + auto db = getDB(ctx); const std::string nsName = urldecode2(ctx.request->urlParams[1]); if (!nsName.length()) { return jsonStatus(ctx, http::HttpStatus(http::StatusBadRequest, "Namespace is not specified")); @@ -578,7 +581,7 @@ int HTTPServer::GetMetaList(http::Context &ctx) { } int HTTPServer::GetMetaByKey(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataRead); + auto db = getDB(ctx); const std::string nsName = urldecode2(ctx.request->urlParams[1]); const std::string key = urldecode2(ctx.request->urlParams[2]); if (!nsName.length()) { @@ -598,7 +601,7 @@ int HTTPServer::GetMetaByKey(http::Context &ctx) { } int HTTPServer::PutMetaByKey(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataWrite); + auto db = getDB(ctx); const std::string nsName = urldecode2(ctx.request->urlParams[1]); if (!nsName.length()) { return jsonStatus(ctx, http::HttpStatus(http::StatusBadRequest, "Namespace is not specified")); @@ -620,7 +623,7 @@ int HTTPServer::PutMetaByKey(http::Context &ctx) { } int HTTPServer::GetIndexes(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataRead); + auto db = getDB(ctx); std::string nsName = urldecode2(ctx.request->urlParams[1]); @@ -629,8 +632,10 @@ int HTTPServer::GetIndexes(http::Context &ctx) { } std::vector nsDefs; - db.EnumNamespaces(nsDefs, EnumNamespacesOpts().WithFilter(nsName)); - + const auto err = db.EnumNamespaces(nsDefs, EnumNamespacesOpts().WithFilter(nsName)); + if (!err.ok()) { + return jsonStatus(ctx, http::HttpStatus(err)); + } if (nsDefs.empty()) { return jsonStatus(ctx, http::HttpStatus(http::StatusNotFound, "Namespace is not found")); } @@ -649,7 +654,7 @@ int HTTPServer::GetIndexes(http::Context &ctx) { } int HTTPServer::PostIndex(http::Context &ctx) { - auto db = getDB(ctx, kRoleDBAdmin); + auto db = getDB(ctx); std::string nsName = urldecode2(ctx.request->urlParams[1]); if (!nsName.length()) { @@ -660,10 +665,13 @@ int HTTPServer::PostIndex(http::Context &ctx) { std::string newIdxName = getNameFromJson(json); std::vector nsDefs; - db.EnumNamespaces(nsDefs, EnumNamespacesOpts().WithFilter(nsName)); + auto err = db.EnumNamespaces(nsDefs, EnumNamespacesOpts().WithFilter(nsName)); + if (!err.ok()) { + return jsonStatus(ctx, http::HttpStatus(err)); + } reindexer::IndexDef idxDef; - auto err = idxDef.FromJSON(giftStr(json)); + err = idxDef.FromJSON(giftStr(json)); if (!err.ok()) { return jsonStatus(ctx, http::HttpStatus{err}); } @@ -686,7 +694,7 @@ int HTTPServer::PostIndex(http::Context &ctx) { } int HTTPServer::PutIndex(http::Context &ctx) { - auto db = getDB(ctx, kRoleDBAdmin); + auto db = getDB(ctx); std::string nsName = urldecode2(ctx.request->urlParams[1]); if (!nsName.length()) { @@ -707,7 +715,7 @@ int HTTPServer::PutIndex(http::Context &ctx) { } int HTTPServer::PutSchema(http::Context &ctx) { - auto db = getDB(ctx, kRoleDBAdmin); + auto db = getDB(ctx); std::string nsName = urldecode2(ctx.request->urlParams[1]); if (nsName.empty()) { @@ -723,7 +731,7 @@ int HTTPServer::PutSchema(http::Context &ctx) { } int HTTPServer::GetSchema(http::Context &ctx) { - auto db = getDB(ctx, kRoleDataRead); + auto db = getDB(ctx); std::string nsName = urldecode2(ctx.request->urlParams[1]); if (nsName.empty()) { @@ -740,7 +748,7 @@ int HTTPServer::GetSchema(http::Context &ctx) { } int HTTPServer::GetProtobufSchema(http::Context &ctx) { - Reindexer db = getDB(ctx, kRoleDataRead); + Reindexer db = getDB(ctx); std::vector nses; for (auto &p : ctx.request->params) { @@ -757,7 +765,7 @@ int HTTPServer::GetProtobufSchema(http::Context &ctx) { } int HTTPServer::DeleteIndex(http::Context &ctx) { - auto db = getDB(ctx, kRoleDBAdmin); + auto db = getDB(ctx); std::string nsName = urldecode2(ctx.request->urlParams[1]); IndexDef idef(urldecode2(ctx.request->urlParams[2])); @@ -789,6 +797,9 @@ int HTTPServer::Check(http::Context &ctx) { builder.Put("start_time", startTs); builder.Put("uptime", uptime); builder.Put("rpc_address", serverConfig_.RPCAddr); + if (!serverConfig_.RPCUnixAddr.empty()) { + builder.Put("urpc_address", serverConfig_.RPCUnixAddr); + } builder.Put("http_address", serverConfig_.HTTPAddr); builder.Put("storage_path", serverConfig_.StoragePath); builder.Put("rpc_log", serverConfig_.RpcLog); @@ -977,17 +988,17 @@ bool HTTPServer::Start(const std::string &addr, ev::dynamic_loop &loop) { } if (serverConfig_.HttpThreadingMode == ServerConfig::kDedicatedThreading) { - listener_.reset(new ForkedListener(loop, http::ServerConnection::NewFactory(router_, serverConfig_.MaxHttpReqSize))); + listener_ = std::make_unique(loop, http::ServerConnection::NewFactory(router_, serverConfig_.MaxHttpReqSize)); } else { - listener_.reset( - new Listener(loop, http::ServerConnection::NewFactory(router_, serverConfig_.MaxHttpReqSize))); + listener_ = std::make_unique>( + loop, http::ServerConnection::NewFactory(router_, serverConfig_.MaxHttpReqSize)); } deadlineChecker_.set(this); deadlineChecker_.set(loop); deadlineChecker_.start(std::chrono::duration_cast(kTxDeadlineCheckPeriod).count(), std::chrono::duration_cast(kTxDeadlineCheckPeriod).count()); - return listener_->Bind(addr); + return listener_->Bind(addr, socket_domain::tcp); } Error HTTPServer::modifyItem(Reindexer &db, std::string &nsName, Item &item, ItemModifyMode mode) { @@ -1029,7 +1040,7 @@ Error HTTPServer::modifyItem(Reindexer &db, std::string &nsName, Item &item, Que } int HTTPServer::modifyItemsJSON(http::Context &ctx, std::string &nsName, const std::vector &precepts, ItemModifyMode mode) { - auto db = getDB(ctx, kRoleDataWrite); + auto db = getDB(ctx); std::string itemJson = ctx.body->Read(); int cnt = 0; std::vector updatedItems; @@ -1066,7 +1077,6 @@ int HTTPServer::modifyItemsJSON(http::Context &ctx, std::string &nsName, const s if (!precepts.empty()) updatedItems.emplace_back(item.GetJSON()); } } - db.Commit(nsName); } WrSerializer ser(ctx.writer->GetChunk()); @@ -1087,7 +1097,7 @@ int HTTPServer::modifyItemsMsgPack(http::Context &ctx, std::string &nsName, cons QueryResults qr; int totalItems = 0; - auto db = getDB(ctx, kRoleDataWrite); + auto db = getDB(ctx); std::string sbuffer = ctx.body->Read(); size_t length = sbuffer.size(); @@ -1120,7 +1130,10 @@ int HTTPServer::modifyItemsMsgPack(http::Context &ctx, std::string &nsName, cons if (!precepts.empty()) { auto itemsArray = msgpackBuilder.Array(kParamItems, qr.Count()); for (size_t i = 0; i < qr.Count(); ++i) { - qr[i].GetMsgPack(wrSer, false); + const auto err = qr[i].GetMsgPack(wrSer, false); + if (!err.ok()) { + return msgpackStatus(ctx, http::HttpStatus(err)); + } } itemsArray.End(); } @@ -1144,7 +1157,7 @@ int HTTPServer::modifyItemsProtobuf(http::Context &ctx, std::string &nsName, con return ctx.Protobuf(reindexer::net::http::HttpStatus::errCodeToHttpStatus(err.code()), wrSer.DetachChunk()); }; - auto db = getDB(ctx, kRoleDataWrite); + auto db = getDB(ctx); Item item = db.NewItem(nsName); if (!item.Status().ok()) return sendResponse(0, item.Status()); @@ -1246,7 +1259,7 @@ int HTTPServer::modifyItems(http::Context &ctx, ItemModifyMode mode) { int HTTPServer::modifyItemsTx(http::Context &ctx, ItemModifyMode mode) { std::string dbName; - auto db = getDB(ctx, kRoleDataWrite, &dbName); + auto db = getDB(ctx, &dbName); std::string txId = urldecode2(ctx.request->urlParams[1]); if (txId.empty()) { return status(ctx, http::HttpStatus(http::StatusBadRequest, "Tx ID is not specified")); @@ -1271,10 +1284,14 @@ int HTTPServer::queryResultsJSON(http::Context &ctx, const reindexer::QueryResul auto iarray = builder.Array(kParamItems); const bool isWALQuery = res.IsWALQuery(); + std::optional db; for (size_t i = offset; i < res.Count() && i < offset + limit; ++i) { if (!isWALQuery) { iarray.Raw(nullptr, ""); - res[i].GetJSON(wrSer, false); + const auto err = res[i].GetJSON(wrSer, false); + if (!err.ok()) { + return jsonStatus(ctx, http::HttpStatus(err)); + } } else { auto obj = iarray.Object(nullptr); { @@ -1284,11 +1301,17 @@ int HTTPServer::queryResultsJSON(http::Context &ctx, const reindexer::QueryResul } if (!res[i].IsRaw()) { iarray.Raw(kWALParamItem, ""); - res[i].GetJSON(wrSer, false); + const auto err = res[i].GetJSON(wrSer, false); + if (!err.ok()) { + return jsonStatus(ctx, http::HttpStatus(err)); + } } else { reindexer::WALRecord rec(res[i].GetRaw()); - rec.GetJSON(obj, [this, &res, &ctx](std::string_view cjson) { - auto item = getDB(ctx, kRoleDataRead).NewItem(res.GetNamespaces()[0]); + rec.GetJSON(obj, [this, &ctx, &res, &db](std::string_view cjson) { + if (!db.has_value()) { + db.emplace(getDB(ctx)); + } + auto item = db->NewItem(res.GetNamespaces()[0]); auto err = item.FromCJSON(cjson); if (!err.ok()) { throw Error(err.code(), "Unable to parse CJSON for WAL item: %s", err.what()); @@ -1395,7 +1418,10 @@ int HTTPServer::queryResultsMsgPack(http::Context &ctx, const reindexer::QueryRe auto itemsArray = msgpackBuilder.Array(kParamItems, std::min(size_t(limit), size_t(res.Count() - offset))); for (size_t i = offset; i < res.Count() && i < offset + limit; i++) { - res[i].GetMsgPack(wrSer, false); + const auto err = res[i].GetMsgPack(wrSer, false); + if (!err.ok()) { + return msgpackStatus(ctx, http::HttpStatus(err)); + } } itemsArray.End(); @@ -1422,7 +1448,10 @@ int HTTPServer::queryResultsProtobuf(http::Context &ctx, const reindexer::QueryR auto item = protobufBuilder.Object(itemsField); auto it = res[i]; auto i1 = item.Object(res.getNsNumber(it.GetItemRef().Nsid()) + 1); - it.GetProtobuf(wrSer, false); + const auto err = it.GetProtobuf(wrSer, false); + if (!err.ok()) { + return ctx.Protobuf(err.code(), wrSer.DetachChunk()); + } i1.End(); item.End(); } @@ -1522,7 +1551,7 @@ void HTTPServer::queryResultParams(Builder &builder, const reindexer::QueryResul int HTTPServer::queryResults(http::Context &ctx, reindexer::QueryResults &res, bool isQueryResults, unsigned limit, unsigned offset) { std::string_view widthParam = ctx.request->params.Get("width"sv); - int width = stoi(widthParam); + int width = widthParam.empty() ? 0 : stoi(widthParam); std::string_view format = ctx.request->params.Get("format"); std::string_view withColumnsParam = ctx.request->params.Get("with_columns"); @@ -1619,7 +1648,8 @@ int HTTPServer::modifyQueryTxImpl(http::Context &ctx, const std::string &dbName, return status(ctx); } -Reindexer HTTPServer::getDB(http::Context &ctx, UserRole role, std::string *dbNameOut) { +template +Reindexer HTTPServer::getDB(http::Context &ctx, std::string *dbNameOut) { (void)ctx; Reindexer *db = nullptr; @@ -1647,8 +1677,27 @@ Reindexer HTTPServer::getDB(http::Context &ctx, UserRole role, std::string *dbNa throw http::HttpStatus(status); } assertrx(db); - return db->NeedTraceActivity() ? db->WithActivityTracer(ctx.request->clientAddr, std::string(ctx.request->headers.Get("User-Agent"))) - : *db; + std::string_view timeoutHeader = ctx.request->headers.Get("Request-Timeout"); + std::optional timeoutSec; + if (!timeoutHeader.empty()) { + timeoutSec = try_stoi(timeoutHeader); + if rx_unlikely (!timeoutSec.has_value()) { + logger_.warn("Unable to get integer value from 'Request-Timeout'-header('%s'). Using default value", timeoutHeader); + } + } + std::chrono::seconds timeout; + + if constexpr (role == kUnauthorized || role == kRoleNone) { + throw Error(errLogic, "Unexpected user's role"); + } else if constexpr (role == kRoleDataRead) { + timeout = timeoutSec.has_value() ? std::chrono::seconds(timeoutSec.value()) : serverConfig_.HttpReadTimeout; + } else if constexpr (role >= kRoleDataWrite) { + timeout = timeoutSec.has_value() ? std::chrono::seconds(timeoutSec.value()) : serverConfig_.HttpWriteTimeout; + } + return db->NeedTraceActivity() + ? db->WithContextParams(timeout, ctx.request->clientAddr, std::string(ctx.request->headers.Get("User-Agent")), + InternalRdxContext::kNoConnectionId) + : db->WithTimeout(timeout); } std::string HTTPServer::getNameFromJson(std::string_view json) { @@ -1662,7 +1711,7 @@ std::string HTTPServer::getNameFromJson(std::string_view json) { } std::shared_ptr HTTPServer::getTx(const std::string &dbName, std::string_view txId) { - std::lock_guard lck(txMtx_); + std::lock_guard lck(txMtx_); auto found = txMap_.find(txId); if (found == txMap_.end()) { throw http::HttpStatus(Error(errNotFound, "Invalid tx id"sv)); @@ -1681,7 +1730,8 @@ std::string HTTPServer::addTx(std::string dbName, Transaction &&tx) { txInfo.tx = std::make_shared(std::move(tx)); txInfo.dbName = std::move(dbName); txInfo.txDeadline = TxDeadlineClock::now() + serverConfig_.TxIdleTimeout; - std::lock_guard lck(txMtx_); + + std::lock_guard lck(txMtx_); auto result = txMap_.try_emplace(txId, std::move(txInfo)); if (!result.second) { throw Error(errLogic, "Tx id conflict"); @@ -1690,7 +1740,7 @@ std::string HTTPServer::addTx(std::string dbName, Transaction &&tx) { } void HTTPServer::removeTx(const std::string &dbName, std::string_view txId) { - std::lock_guard lck(txMtx_); + std::lock_guard lck(txMtx_); auto found = txMap_.find(txId); if (found == txMap_.end() || !iequals(found.value().dbName, dbName)) { throw Error(errNotFound, "Invalid tx id"); @@ -1699,8 +1749,9 @@ void HTTPServer::removeTx(const std::string &dbName, std::string_view txId) { } void HTTPServer::removeExpiredTx() { - auto now = TxDeadlineClock::now(); - std::lock_guard lck(txMtx_); + const auto now = TxDeadlineClock::now(); + + std::lock_guard lck(txMtx_); for (auto it = txMap_.begin(); it != txMap_.end();) { if (it->second.txDeadline <= now) { auto ctx = MakeSystemAuthContext(); @@ -1708,9 +1759,9 @@ void HTTPServer::removeExpiredTx() { if (status.ok()) { reindexer::Reindexer *db = nullptr; status = ctx.GetDB(kRoleSystem, &db); - if (db) { + if (db && status.ok()) { logger_.info("Rollback tx {} on idle deadline", it->first); - db->RollBackTransaction(*it->second.tx); + status = db->RollBackTransaction(*it->second.tx); } } it = txMap_.erase(it); @@ -1761,8 +1812,7 @@ int HTTPServer::BeginTx(http::Context &ctx) { } std::string dbName; - auto db = getDB(ctx, kRoleDataWrite, &dbName); - auto tx = db.NewTransaction(nsName); + auto tx = getDB(ctx, &dbName).NewTransaction(nsName); if (!tx.Status().ok()) { return status(ctx, http::HttpStatus(tx.Status())); } @@ -1789,12 +1839,12 @@ int HTTPServer::CommitTx(http::Context &ctx) { } std::string dbName; - auto db = getDB(ctx, kRoleDataWrite, &dbName); + auto db = getDB(ctx, &dbName); auto tx = getTx(dbName, txId); QueryResults qr; auto ret = db.CommitTransaction(*tx, qr); if (!ret.ok()) { - return status(ctx, http::HttpStatus(http::StatusInternalServerError, ret.what())); + return status(ctx, http::HttpStatus(ret)); } removeTx(dbName, txId); return queryResults(ctx, qr); @@ -1807,7 +1857,7 @@ int HTTPServer::RollbackTx(http::Context &ctx) { } std::string dbName; - auto db = getDB(ctx, kRoleDataWrite, &dbName); + auto db = getDB(ctx, &dbName); auto tx = getTx(dbName, txId); QueryResults qr; auto ret = db.RollBackTransaction(*tx); @@ -1828,7 +1878,7 @@ int HTTPServer::DeleteItemsTx(http::Context &ctx) { return modifyItemsTx(ctx, Mo int HTTPServer::GetSQLQueryTx(http::Context &ctx) { std::string dbName; - auto db = getDB(ctx, kRoleDataRead, &dbName); + auto db = getDB(ctx, &dbName); std::string txId = urldecode2(ctx.request->urlParams[1]); if (txId.empty()) { return status(ctx, http::HttpStatus(http::StatusBadRequest, "Tx ID is not specified")); @@ -1836,7 +1886,7 @@ int HTTPServer::GetSQLQueryTx(http::Context &ctx) { reindexer::QueryResults res; std::string sqlQuery = urldecode2(ctx.request->params.Get("q")); if (sqlQuery.empty()) { - return status(ctx, http::HttpStatus(http::StatusBadRequest, "Missed `q` parameter")); + return status(ctx, http::HttpStatus(http::StatusBadRequest, "Missing `q` parameter")); } try { @@ -1849,9 +1899,8 @@ int HTTPServer::GetSQLQueryTx(http::Context &ctx) { case QuerySelect: case QueryTruncate: return status(ctx, http::HttpStatus(http::StatusInternalServerError, "Transactions support update/delete queries only")); - default: - abort(); } + return status(ctx, http::HttpStatus(Error(errLogic, "Unexpected query type: %d", q.type_))); } catch (const Error &e) { return status(ctx, http::HttpStatus(e)); } @@ -1859,7 +1908,7 @@ int HTTPServer::GetSQLQueryTx(http::Context &ctx) { int HTTPServer::DeleteQueryTx(http::Context &ctx) { std::string dbName; - auto db = getDB(ctx, kRoleDataWrite, &dbName); + auto db = getDB(ctx, &dbName); std::string dsl = ctx.body->Read(); reindexer::Query q; @@ -1920,12 +1969,15 @@ void HTTPServer::Logger(http::Context &ctx) { void HTTPServer::OnResponse(http::Context &ctx) { if (statsWatcher_) { - std::string dbName = ""; + static const std::string kUnknownDBName = ""; + std::string dbName; + const std::string *dbNamePtr = &kUnknownDBName; if (nullptr != ctx.request && !ctx.request->urlParams.empty() && 0 == ctx.request->path.find("/api/v1/db/"sv)) { dbName = urldecode2(ctx.request->urlParams[0]); + dbNamePtr = &dbName; } - statsWatcher_->OnInputTraffic(dbName, statsSourceName(), ctx.stat.sizeStat.reqSizeBytes); - statsWatcher_->OnOutputTraffic(dbName, statsSourceName(), ctx.stat.sizeStat.respSizeBytes); + statsWatcher_->OnInputTraffic(*dbNamePtr, statsSourceName(), std::string_view(), ctx.stat.sizeStat.reqSizeBytes); + statsWatcher_->OnOutputTraffic(*dbNamePtr, statsSourceName(), std::string_view(), ctx.stat.sizeStat.respSizeBytes); } } diff --git a/cpp_src/server/httpserver.h b/cpp_src/server/httpserver.h index 647c8c7c9..15420a135 100644 --- a/cpp_src/server/httpserver.h +++ b/cpp_src/server/httpserver.h @@ -18,7 +18,7 @@ struct IStatsWatcher; using namespace reindexer::net; -struct HTTPClientData : public http::ClientData { +struct HTTPClientData final : public http::ClientData { AuthContext auth; }; @@ -108,7 +108,8 @@ class HTTPServer { unsigned prepareOffset(std::string_view offsetParam, int offsetDefault = kDefaultOffset); int modifyQueryTxImpl(http::Context &ctx, const std::string &dbName, std::string_view txId, Query &q); - Reindexer getDB(http::Context &ctx, UserRole role, std::string *dbNameOut = nullptr); + template + Reindexer getDB(http::Context &ctx, std::string *dbNameOut = nullptr); std::string getNameFromJson(std::string_view json); constexpr static std::string_view statsSourceName() { return std::string_view{"http"}; } diff --git a/cpp_src/server/rpcserver.cc b/cpp_src/server/rpcserver.cc index c0731aa5d..c729cb065 100644 --- a/cpp_src/server/rpcserver.cc +++ b/cpp_src/server/rpcserver.cc @@ -6,7 +6,6 @@ #include "core/transactionimpl.h" #include "net/cproto/cproto.h" #include "net/cproto/serverconnection.h" -#include "net/listener.h" #include "reindexer_version.h" #include "vendor/msgpack/msgpack.h" @@ -14,7 +13,7 @@ namespace reindexer_server { using namespace std::string_view_literals; const reindexer::SemVersion kMinUnknownReplSupportRxVersion("2.6.0"); -const size_t kMaxTxCount = 1024; +constexpr size_t kMaxTxCount = 1024; RPCServer::RPCServer(DBManager &dbMgr, LoggerWrapper &logger, IClientsStats *clientsStats, const ServerConfig &scfg, IStatsWatcher *statsCollector) @@ -47,7 +46,8 @@ Error RPCServer::Login(cproto::Context &ctx, p_string login, p_string password, return Error(errParams, "Already logged in"); } - std::unique_ptr clientData(new RPCClientData); + auto clientData = std::make_unique(); + auto &clientDataRef = *clientData; clientData->connID = connCounter.fetch_add(1, std::memory_order_relaxed); clientData->pusher.SetWriter(ctx.writer); @@ -84,6 +84,7 @@ Error RPCServer::Login(cproto::Context &ctx, p_string login, p_string password, reindexer::ClientConnectionStat conn; conn.connectionStat = ctx.writer->GetConnectionStat(); conn.ip = std::string(ctx.clientAddr); + conn.protocol = protocolName_; conn.userName = clientData->auth.Login(); conn.dbName = clientData->auth.DBName(); conn.userRights = std::string(UserRoleName(clientData->auth.UserRights())); @@ -95,23 +96,23 @@ Error RPCServer::Login(cproto::Context &ctx, p_string login, p_string password, } ctx.SetClientData(std::move(clientData)); - if (statsWatcher_) { - statsWatcher_->OnClientConnected(dbName, statsSourceName()); - } - int64_t startTs = std::chrono::duration_cast(startTs_.time_since_epoch()).count(); - static std::string_view version = REINDEX_VERSION; status = db.length() ? OpenDatabase(ctx, db, createDBIfMissing) : errOK; if (status.ok()) { + const int64_t startTs = std::chrono::duration_cast(startTs_.time_since_epoch()).count(); + constexpr std::string_view version = REINDEX_VERSION; ctx.Return({cproto::Arg(p_string(&version)), cproto::Arg(startTs)}, status); } + if (statsWatcher_) { + statsWatcher_->OnClientConnected(clientDataRef.auth.DBName(), statsSourceName(), protocolName_); + } return status; } -static RPCClientData *getClientDataUnsafe(cproto::Context &ctx) { return dynamic_cast(ctx.GetClientData()); } +static RPCClientData *getClientDataUnsafe(cproto::Context &ctx) noexcept { return dynamic_cast(ctx.GetClientData()); } -static RPCClientData *getClientDataSafe(cproto::Context &ctx) { +static RPCClientData *getClientDataSafe(cproto::Context &ctx) noexcept { auto ret = dynamic_cast(ctx.GetClientData()); if rx_unlikely (!ret) std::abort(); // It has to be set by the middleware return ret; @@ -130,9 +131,8 @@ Error RPCServer::OpenDatabase(cproto::Context &ctx, p_string db, std::optionalauth.ResetDB(); - return errOK; + getClientDataSafe(ctx)->auth.ResetDB(); + return {}; } Error RPCServer::DropDatabase(cproto::Context &ctx) { auto clientData = getClientDataSafe(ctx); @@ -149,14 +149,14 @@ Error RPCServer::CheckAuth(cproto::Context &ctx) { auto clientData = dynamic_cast(ptr); if (ctx.call->cmd == cproto::kCmdLogin || ctx.call->cmd == cproto::kCmdPing) { - return errOK; + return {}; } if (!clientData) { return Error(errForbidden, "You should login"); } - return errOK; + return {}; } void RPCServer::OnClose(cproto::Context &ctx, const Error &err) { @@ -166,7 +166,7 @@ void RPCServer::OnClose(cproto::Context &ctx, const Error &err) { auto clientData = getClientDataUnsafe(ctx); if (clientData) { if (statsWatcher_) { - statsWatcher_->OnClientDisconnected(clientData->auth.DBName(), statsSourceName()); + statsWatcher_->OnClientDisconnected(clientData->auth.DBName(), statsSourceName(), protocolName_); } if (clientsStats_) { clientsStats_->DeleteConnection(clientData->connID); @@ -186,11 +186,12 @@ void RPCServer::OnClose(cproto::Context &ctx, const Error &err) { void RPCServer::OnResponse(cproto::Context &ctx) { if (statsWatcher_) { auto clientData = getClientDataUnsafe(ctx); - auto dbName = (clientData != nullptr) ? clientData->auth.DBName() : ""; - statsWatcher_->OnOutputTraffic(dbName, statsSourceName(), ctx.stat.sizeStat.respSizeBytes); + static const std::string kUnknownDbName(""); + const std::string &dbName = (clientData != nullptr) ? clientData->auth.DBName() : kUnknownDbName; + statsWatcher_->OnOutputTraffic(dbName, statsSourceName(), protocolName_, ctx.stat.sizeStat.respSizeBytes); if (ctx.stat.sizeStat.respSizeBytes) { // Don't update stats on responses like "updates push" - statsWatcher_->OnInputTraffic(dbName, statsSourceName(), ctx.stat.sizeStat.reqSizeBytes); + statsWatcher_->OnInputTraffic(dbName, statsSourceName(), protocolName_, ctx.stat.sizeStat.reqSizeBytes); } } } @@ -207,10 +208,9 @@ Error RPCServer::execSqlQueryByType(std::string_view sqlQuery, QueryResults &res case QueryUpdate: return getDB(ctx, kRoleDataWrite).Update(q, res); case QueryTruncate: - return getDB(ctx, kRoleDBAdmin).TruncateNamespace(q._namespace); - default: - return Error(errParams, "unknown query type %d", q.Type()); + return getDB(ctx, kRoleDBAdmin).TruncateNamespace(q.NsName()); } + return Error(errParams, "unknown query type %d", q.Type()); } catch (Error &e) { return e; } @@ -218,7 +218,9 @@ Error RPCServer::execSqlQueryByType(std::string_view sqlQuery, QueryResults &res void RPCServer::Logger(cproto::Context &ctx, const Error &err, const cproto::Args &ret) { const auto clientData = getClientDataUnsafe(ctx); - WrSerializer ser; + uint8_t buf[0x500]; + WrSerializer ser(buf); + ser << "p='" << protocolName_ << '\''; if (clientData) { ser << "c='"sv << clientData->connID << "' db='"sv << clientData->auth.Login() << '@' << clientData->auth.DBName() << "' "sv; } else { @@ -426,6 +428,7 @@ Error RPCServer::CommitTx(cproto::Context &ctx, int64_t txId, std::optional Transaction &tr = getTx(ctx, txId); QueryResults qres; + auto err = db.CommitTransaction(tr, qres); if (err.ok()) { int32_t ptVers = -1; @@ -954,7 +957,7 @@ Error RPCServer::SubscribeUpdates(cproto::Context &ctx, int flag, std::optional< return ret; } -bool RPCServer::Start(const std::string &addr, ev::dynamic_loop &loop) { +bool RPCServer::Start(const std::string &addr, ev::dynamic_loop &loop, RPCSocketT sockDomain, std::string_view threadingMode) { dispatcher_.Register(cproto::kCmdPing, this, &RPCServer::Ping); dispatcher_.Register(cproto::kCmdLogin, this, &RPCServer::Login); dispatcher_.Register(cproto::kCmdOpenDatabase, this, &RPCServer::OpenDatabase); @@ -1004,11 +1007,12 @@ bool RPCServer::Start(const std::string &addr, ev::dynamic_loop &loop) { dispatcher_.Logger(this, &RPCServer::Logger); } + protocolName_ = (sockDomain == RPCSocketT::TCP) ? kTcpProtocolName : kUnixProtocolName; auto factory = cproto::ServerConnection::NewFactory(dispatcher_, serverConfig_.EnableConnectionsStats, serverConfig_.MaxUpdatesSize); - if (serverConfig_.RPCThreadingMode == ServerConfig::kDedicatedThreading) { - listener_.reset(new ForkedListener(loop, std::move(factory))); + if (threadingMode == ServerConfig::kDedicatedThreading) { + listener_ = std::make_unique(loop, std::move(factory)); } else { - listener_.reset(new Listener(loop, std::move(factory))); + listener_ = std::make_unique>(loop, std::move(factory)); } assertrx(!qrWatcherThread_.joinable()); @@ -1026,7 +1030,7 @@ bool RPCServer::Start(const std::string &addr, ev::dynamic_loop &loop) { qrWatcher_.Stop(); }); - return listener_->Bind(addr); + return listener_->Bind(addr, (sockDomain == RPCSocketT::TCP) ? socket_domain::tcp : socket_domain::unx); } RPCClientData::~RPCClientData() { diff --git a/cpp_src/server/rpcserver.h b/cpp_src/server/rpcserver.h index 235ecc8e2..bc75b3f3f 100644 --- a/cpp_src/server/rpcserver.h +++ b/cpp_src/server/rpcserver.h @@ -23,7 +23,9 @@ namespace reindexer_server { using namespace reindexer::net; using namespace reindexer; -struct RPCClientData : public cproto::ClientData { +enum class RPCSocketT : bool { Unx, TCP }; + +struct RPCClientData final : public cproto::ClientData { ~RPCClientData(); h_vector results; std::vector txs; @@ -42,7 +44,7 @@ class RPCServer { IStatsWatcher *statsCollector = nullptr); ~RPCServer(); - bool Start(const std::string &addr, ev::dynamic_loop &loop); + bool Start(const std::string &addr, ev::dynamic_loop &loop, RPCSocketT sockDomain, std::string_view threadingMode); void Stop() { terminate_ = true; if (qrWatcherThread_.joinable()) { @@ -121,7 +123,7 @@ class RPCServer { void clearTx(cproto::Context &ctx, uint64_t txId); Reindexer getDB(cproto::Context &ctx, UserRole role); - constexpr static std::string_view statsSourceName() { return std::string_view{"rpc"}; } + constexpr static std::string_view statsSourceName() noexcept { return std::string_view{"rpc"}; } DBManager &dbMgr_; cproto::Dispatcher dispatcher_; @@ -138,6 +140,7 @@ class RPCServer { RPCQrWatcher qrWatcher_; std::atomic terminate_ = {false}; ev::async qrWatcherTerminateAsync_; + std::string_view protocolName_; }; } // namespace reindexer_server diff --git a/cpp_src/server/serverimpl.cc b/cpp_src/server/serverimpl.cc index cc7ed0255..83955b45e 100644 --- a/cpp_src/server/serverimpl.cc +++ b/cpp_src/server/serverimpl.cc @@ -286,10 +286,9 @@ int ServerImpl::run() { initCoreLogger(); logger_.info("Initializing databases..."); - std::unique_ptr clientsStats; - if (config_.EnableConnectionsStats) clientsStats.reset(new ClientsStats()); + const auto clientsStats = config_.EnableConnectionsStats ? std::make_unique() : std::unique_ptr(); try { - dbMgr_.reset(new DBManager(config_.StoragePath, !config_.EnableSecurity, clientsStats.get())); + dbMgr_ = std::make_unique(config_.StoragePath, !config_.EnableSecurity, clientsStats.get()); auto status = dbMgr_->Init(config_.StorageEngine, config_.StartWithErrors, config_.Autorepair); if (!status.ok()) { @@ -298,8 +297,13 @@ int ServerImpl::run() { } storageLoaded_ = true; - logger_.info("Starting reindexer_server ({0}) on {1} HTTP, {2} RPC, with db '{3}'", REINDEX_VERSION, config_.HTTPAddr, - config_.RPCAddr, config_.StoragePath); + if (config_.RPCUnixAddr.empty()) { + logger_.info("Starting reindexer_server ({0}) on {1} HTTP, {2} RPC(TCP), with db '{3}'", REINDEX_VERSION, config_.HTTPAddr, + config_.RPCAddr, config_.StoragePath); + } else { + logger_.info("Starting reindexer_server ({0}) on {1} HTTP, {2} RPC(TCP), {3} RPC(Unix), with db '{4}'", REINDEX_VERSION, + config_.HTTPAddr, config_.RPCAddr, config_.RPCUnixAddr, config_.StoragePath); + } std::unique_ptr prometheus; std::unique_ptr statsCollector; @@ -316,10 +320,22 @@ int ServerImpl::run() { } LoggerWrapper rpcLogger("rpc"); - std::unique_ptr rpcServer = - std::make_unique(*dbMgr_, rpcLogger, clientsStats.get(), config_, statsCollector.get()); - if (!rpcServer->Start(config_.RPCAddr, loop_)) { - logger_.error("Can't listen RPC on '{0}'", config_.RPCAddr); + auto rpcServerTCP = std::make_unique(*dbMgr_, rpcLogger, clientsStats.get(), config_, statsCollector.get()); + std::unique_ptr rpcServerUnix; + if (!config_.RPCUnixAddr.empty()) { +#ifdef _WIN32 + logger_.warn("Unable to startup RPC(Unix) on '{0}' (unix domain socket are not supported on Windows platforms)", + config_.RPCUnixAddr); +#else // _WIN32 + rpcServerUnix = std::make_unique(*dbMgr_, rpcLogger, clientsStats.get(), config_, statsCollector.get()); + if (!rpcServerUnix->Start(config_.RPCUnixAddr, loop_, RPCSocketT::Unx, config_.RPCUnixThreadingMode)) { + logger_.error("Can't listen RPC(Unix) on '{0}'", config_.RPCUnixAddr); + return EXIT_FAILURE; + } +#endif // _WIN32 + } + if (!rpcServerTCP->Start(config_.RPCAddr, loop_, RPCSocketT::TCP, config_.RPCThreadingMode)) { + logger_.error("Can't listen RPC(TCP) on '{0}'", config_.RPCAddr); return EXIT_FAILURE; } #if defined(WITH_GRPC) @@ -391,8 +407,12 @@ int ServerImpl::run() { if (statsCollector) statsCollector->Stop(); logger_.info("Stats collector shutdown completed."); - rpcServer->Stop(); - logger_.info("RPC Server shutdown completed."); + if (rpcServerUnix) { + rpcServerUnix->Stop(); + logger_.info("RPC Server(Unix) shutdown completed."); + } + rpcServerTCP->Stop(); + logger_.info("RPC Server(TCP) shutdown completed."); httpServer.Stop(); logger_.info("HTTP Server shutdown completed."); #if defined(WITH_GRPC) diff --git a/cpp_src/server/statscollect/istatswatcher.h b/cpp_src/server/statscollect/istatswatcher.h index ac9f82bb4..1abe4ee03 100644 --- a/cpp_src/server/statscollect/istatswatcher.h +++ b/cpp_src/server/statscollect/istatswatcher.h @@ -31,10 +31,10 @@ class StatsWatcherSuspend { struct IStatsWatcher { [[nodiscard]] virtual StatsWatcherSuspend SuspendStatsThread() = 0; - virtual void OnInputTraffic(const std::string& db, std::string_view source, size_t bytes) noexcept = 0; - virtual void OnOutputTraffic(const std::string& db, std::string_view source, size_t bytes) noexcept = 0; - virtual void OnClientConnected(const std::string& db, std::string_view source) noexcept = 0; - virtual void OnClientDisconnected(const std::string& db, std::string_view source) noexcept = 0; + virtual void OnInputTraffic(const std::string& db, std::string_view source, std::string_view protocol, size_t bytes) noexcept = 0; + virtual void OnOutputTraffic(const std::string& db, std::string_view source, std::string_view protocol, size_t bytes) noexcept = 0; + virtual void OnClientConnected(const std::string& db, std::string_view source, std::string_view protocol) noexcept = 0; + virtual void OnClientDisconnected(const std::string& db, std::string_view source, std::string_view protocol) noexcept = 0; virtual ~IStatsWatcher() noexcept = default; }; diff --git a/cpp_src/server/statscollect/prometheus.cc b/cpp_src/server/statscollect/prometheus.cc index 2e7040118..7b1b9b693 100644 --- a/cpp_src/server/statscollect/prometheus.cc +++ b/cpp_src/server/statscollect/prometheus.cc @@ -32,13 +32,17 @@ void Prometheus::Attach(http::Router& router) { void Prometheus::NextEpoch() { registry_.RemoveOutdated(currentEpoch_++ - 1); } +void Prometheus::setMetricValue(PFamily* metricFamily, double value, int64_t epoch) { + if (metricFamily) { + metricFamily->Add(std::map{}, epoch).Set(value); + } +} + void Prometheus::setMetricValue(PFamily* metricFamily, double value, int64_t epoch, const std::string& db, const std::string& ns, std::string_view queryType) { if (metricFamily) { std::map labels; - if (!db.empty()) { - labels.emplace("db", db); - } + labels.emplace("db", db); if (!ns.empty()) { labels.emplace("ns", ns); } @@ -49,15 +53,17 @@ void Prometheus::setMetricValue(PFamily* metricFamily, doubl } } -void Prometheus::setMetricValue(PFamily* metricFamily, double value, int64_t epoch, const std::string& db, std::string_view type) { +void Prometheus::setNetMetricValue(PFamily* metricFamily, double value, int64_t epoch, const std::string& db, std::string_view type, + std::string_view protocol) { if (metricFamily) { std::map labels; - if (!db.empty()) { - labels.emplace("db", std::string(db)); - } + labels.emplace("db", db); if (!type.empty()) { labels.emplace("type", std::string(type)); } + if (!protocol.empty()) { + labels.emplace("protocol_domain", std::string(protocol)); + } metricFamily->Add(std::move(labels), epoch).Set(value); } } diff --git a/cpp_src/server/statscollect/prometheus.h b/cpp_src/server/statscollect/prometheus.h index 9fb0fa903..8e5862676 100644 --- a/cpp_src/server/statscollect/prometheus.h +++ b/cpp_src/server/statscollect/prometheus.h @@ -37,12 +37,14 @@ class Prometheus { setMetricValue(itemsCount_, count, currentEpoch_, db, ns); } void RegisterAllocatedMemory(size_t memoryConsumationBytes) { setMetricValue(memory_, memoryConsumationBytes, prometheus::kNoEpoch); } - void RegisterRPCClients(const std::string &db, size_t count) { setMetricValue(rpcClients_, count, currentEpoch_, db); } - void RegisterInputTraffic(const std::string &db, std::string_view type, size_t bytes) { - setMetricValue(inputTraffic_, bytes, prometheus::kNoEpoch, db, type); + void RegisterRPCClients(const std::string &db, std::string_view protocol, size_t count) { + setNetMetricValue(rpcClients_, count, currentEpoch_, db, std::string_view(), protocol); } - void RegisterOutputTraffic(const std::string &db, std::string_view type, size_t bytes) { - setMetricValue(outputTraffic_, bytes, prometheus::kNoEpoch, db, type); + void RegisterInputTraffic(const std::string &db, std::string_view type, std::string_view protocol, size_t bytes) { + setNetMetricValue(inputTraffic_, bytes, prometheus::kNoEpoch, db, type, protocol); + } + void RegisterOutputTraffic(const std::string &db, std::string_view type, std::string_view protocol, size_t bytes) { + setNetMetricValue(outputTraffic_, bytes, prometheus::kNoEpoch, db, type, protocol); } void RegisterStorageStatus(const std::string &db, const std::string &ns, bool isOK) { setMetricValue(storageStatus_, isOK ? 1.0 : 0.0, prometheus::kNoEpoch, db, ns); @@ -51,9 +53,11 @@ class Prometheus { void NextEpoch(); private: - static void setMetricValue(PFamily *metricFamily, double value, int64_t epoch, const std::string &db = "", - const std::string &ns = "", std::string_view queryType = ""); - static void setMetricValue(PFamily *metricFamily, double value, int64_t epoch, const std::string &db, std::string_view type); + static void setMetricValue(PFamily *metricFamily, double value, int64_t epoch); + static void setMetricValue(PFamily *metricFamily, double value, int64_t epoch, const std::string &db, const std::string &ns, + std::string_view queryType = ""); + static void setNetMetricValue(PFamily *metricFamily, double value, int64_t epoch, const std::string &db, std::string_view type, + std::string_view protocol); void fillRxInfo(); int collect(http::Context &ctx); diff --git a/cpp_src/server/statscollect/statscollector.cc b/cpp_src/server/statscollect/statscollector.cc index f241bb9f6..55b2e0127 100644 --- a/cpp_src/server/statscollect/statscollector.cc +++ b/cpp_src/server/statscollect/statscollector.cc @@ -76,31 +76,34 @@ StatsWatcherSuspend StatsCollector::SuspendStatsThread() { return StatsWatcherSuspend(std::move(lck), *this, false); } -void StatsCollector::OnInputTraffic(const std::string& db, std::string_view source, size_t bytes) noexcept { +void StatsCollector::OnInputTraffic(const std::string& db, std::string_view source, std::string_view protocol, size_t bytes) noexcept { if (prometheus_ && enabled_.load(std::memory_order_acquire)) { std::lock_guard lck(countersMtx_); - getCounters(db, source).inputTraffic += bytes; + getCounters(db, source, protocol).inputTraffic += bytes; } } -void StatsCollector::OnOutputTraffic(const std::string& db, std::string_view source, size_t bytes) noexcept { +void StatsCollector::OnOutputTraffic(const std::string& db, std::string_view source, std::string_view protocol, size_t bytes) noexcept { if (prometheus_ && enabled_.load(std::memory_order_acquire)) { std::lock_guard lck(countersMtx_); - getCounters(db, source).outputTraffic += bytes; + getCounters(db, source, protocol).outputTraffic += bytes; } } -void StatsCollector::OnClientConnected(const std::string& db, std::string_view source) noexcept { +void StatsCollector::OnClientConnected(const std::string& db, std::string_view source, std::string_view protocol) noexcept { if (prometheus_ && enabled_.load(std::memory_order_acquire)) { std::lock_guard lck(countersMtx_); - ++(getCounters(db, source).clients); + ++(getCounters(db, source, protocol).clients); } } -void StatsCollector::OnClientDisconnected(const std::string& db, std::string_view source) noexcept { +void StatsCollector::OnClientDisconnected(const std::string& db, std::string_view source, std::string_view protocol) noexcept { if (prometheus_ && enabled_.load(std::memory_order_acquire)) { std::lock_guard lck(countersMtx_); - --(getCounters(db, source).clients); + auto& counters = getCounters(db, source, protocol); + if (counters.clients) { + --counters.clients; + } } } @@ -151,9 +154,10 @@ void StatsCollector::collectStats(DBManager& dbMngr) { constexpr static auto kPerfstatsNs = "#perfstats"sv; constexpr static auto kMemstatsNs = "#memstats"sv; + static const auto kPerfstatsQuery = Query(std::string(kPerfstatsNs)); QueryResults qr; - status = db->Select(Query(std::string(kPerfstatsNs)), qr); - if (status.ok() && qr.Count()) { + status = db->Select(kPerfstatsQuery, qr); + if (status.ok()) { for (auto it = qr.begin(); it != qr.end(); ++it) { auto item = it.GetItem(false); std::string nsName = item["name"].As(); @@ -171,8 +175,9 @@ void StatsCollector::collectStats(DBManager& dbMngr) { } qr.Clear(); - status = db->Select(Query(std::string(kMemstatsNs)), qr); - if (status.ok() && qr.Count()) { + static const auto kMemstatsQuery = Query(std::string(kMemstatsNs)); + status = db->Select(kMemstatsQuery, qr); + if (status.ok()) { for (auto it = qr.begin(); it != qr.end(); ++it) { auto item = it.GetItem(false); auto nsName = item["name"].As(); @@ -207,12 +212,12 @@ void StatsCollector::collectStats(DBManager& dbMngr) { { std::lock_guard lck(countersMtx_); for (const auto& dbCounters : counters_) { - for (const auto& counter : dbCounters.second) { - if (std::string_view(dbCounters.first) == "rpc"sv) { - prometheus_->RegisterRPCClients(counter.first, counter.second.clients); + for (const auto& counter : dbCounters.counters) { + if (std::string_view(dbCounters.source) == "rpc"sv) { + prometheus_->RegisterRPCClients(counter.first, dbCounters.protocol, counter.second.clients); } - prometheus_->RegisterInputTraffic(counter.first, dbCounters.first, counter.second.inputTraffic); - prometheus_->RegisterOutputTraffic(counter.first, dbCounters.first, counter.second.outputTraffic); + prometheus_->RegisterInputTraffic(counter.first, dbCounters.source, dbCounters.protocol, counter.second.inputTraffic); + prometheus_->RegisterOutputTraffic(counter.first, dbCounters.source, dbCounters.protocol, counter.second.outputTraffic); } } } @@ -220,15 +225,15 @@ void StatsCollector::collectStats(DBManager& dbMngr) { prometheus_->NextEpoch(); } -StatsCollector::DBCounters& StatsCollector::getCounters(const std::string& db, std::string_view source) { +StatsCollector::DBCounters& StatsCollector::getCounters(const std::string& db, std::string_view source, std::string_view protocol) { for (auto& el : counters_) { - if (std::string_view(el.first) == source) { - return el.second[db]; + if (std::string_view(el.source) == source && std::string_view(el.protocol) == protocol) { + return el.counters[db]; } } - counters_.emplace_back(std::string(source), CountersByDB()); - auto& sourceMap = counters_.back().second; - return sourceMap[db]; + return counters_ + .emplace_back(SourceCounters{.source = std::string(source), .protocol = std::string(protocol), .counters = CountersByDB()}) + .counters[db]; } } // namespace reindexer_server diff --git a/cpp_src/server/statscollect/statscollector.h b/cpp_src/server/statscollect/statscollector.h index 11eddc6ba..f30273946 100644 --- a/cpp_src/server/statscollect/statscollector.h +++ b/cpp_src/server/statscollect/statscollector.h @@ -15,7 +15,7 @@ namespace reindexer_server { class DBManager; class Prometheus; -class StatsCollector : public IStatsWatcher, public IStatsStarter { +class StatsCollector final : public IStatsWatcher, public IStatsStarter { public: StatsCollector(DBManager& dbMngr, Prometheus* prometheus, std::chrono::milliseconds collectPeriod, LoggerWrapper logger) : dbMngr_(dbMngr), @@ -26,14 +26,14 @@ class StatsCollector : public IStatsWatcher, public IStatsStarter { logger_(std::move(logger)) {} ~StatsCollector() override { Stop(); } void Start(); - void Restart(std::unique_lock&& lck) noexcept override final; + void Restart(std::unique_lock&& lck) noexcept override; void Stop(); - [[nodiscard]] StatsWatcherSuspend SuspendStatsThread() override final; - void OnInputTraffic(const std::string& db, std::string_view source, size_t bytes) noexcept override final; - void OnOutputTraffic(const std::string& db, std::string_view source, size_t bytes) noexcept override final; - void OnClientConnected(const std::string& db, std::string_view source) noexcept override final; - void OnClientDisconnected(const std::string& db, std::string_view source) noexcept override final; + [[nodiscard]] StatsWatcherSuspend SuspendStatsThread() override; + void OnInputTraffic(const std::string& db, std::string_view source, std::string_view protocol, size_t bytes) noexcept override; + void OnOutputTraffic(const std::string& db, std::string_view source, std::string_view protocol, size_t bytes) noexcept override; + void OnClientConnected(const std::string& db, std::string_view source, std::string_view protocol) noexcept override; + void OnClientDisconnected(const std::string& db, std::string_view source, std::string_view protocol) noexcept override; private: void startImpl(); @@ -45,11 +45,17 @@ class StatsCollector : public IStatsWatcher, public IStatsStarter { uint64_t inputTraffic{0}; uint64_t outputTraffic{0}; }; + using CountersByDB = std::unordered_map; - using Counters = std::vector>; + struct SourceCounters { + std::string source; + std::string protocol; + CountersByDB counters; + }; + using Counters = std::vector; void collectStats(DBManager& dbMngr); - DBCounters& getCounters(const std::string& db, std::string_view source); + DBCounters& getCounters(const std::string& db, std::string_view source, std::string_view protocol); DBManager& dbMngr_; Prometheus* prometheus_; diff --git a/cpp_src/tools/assertrx.cc b/cpp_src/tools/assertrx.cc index ba6b640ad..d90c72058 100644 --- a/cpp_src/tools/assertrx.cc +++ b/cpp_src/tools/assertrx.cc @@ -9,7 +9,7 @@ namespace reindexer { -void fail_assertrx(const char *assertion, const char *file, unsigned line, const char *function) { +void fail_assertrx(const char *assertion, const char *file, unsigned line, const char *function) noexcept { std::cerr << fmt::sprintf("Assertion failed: %s (%s:%u: %s)\n", assertion, file, line, function); debug::print_crash_query(std::cerr); std::abort(); diff --git a/cpp_src/tools/assertrx.h b/cpp_src/tools/assertrx.h index 35fe5e33c..32233c3fc 100644 --- a/cpp_src/tools/assertrx.h +++ b/cpp_src/tools/assertrx.h @@ -7,9 +7,12 @@ namespace reindexer { #ifdef NDEBUG #define assertrx(e) ((void)0) #define assertrx_throw(e) ((void)0) +#define assertrx_dbg(e) ((void)0) #else // !NDEBUG -[[noreturn]] void fail_assertrx(const char *assertion, const char *file, unsigned line, const char *function); +// fail_assertrx can actually throw, but this exception can not be handled properly, +// so it was marked as 'noexcept' for the optimization purposes +[[noreturn]] void fail_assertrx(const char *assertion, const char *file, unsigned line, const char *function) noexcept; [[noreturn]] void fail_throwrx(const char *assertion, const char *file, unsigned line, const char *function); #ifdef __cplusplus @@ -18,6 +21,17 @@ namespace reindexer { (rx_likely(static_cast(expr)) ? void(0) : reindexer::fail_throwrx(#expr, __FILE__, __LINE__, __FUNCTION__)) #endif // __cplusplus +#ifndef RX_WITH_STDLIB_DEBUG +#define assertrx_dbg(e) ((void)0) +#else // RX_WITH_STDLIB_DEBUG + +#ifdef __cplusplus +// Macro for the extra debug. Works only when RX_WITH_STDLIB_DEBUG is defined and NDEBUG is not defined +#define assertrx_dbg(expr) assertrx(expr) +#endif // __cplusplus + +#endif // !RX_WITH_STDLIB_DEBUG + #endif // NDEBUG } // namespace reindexer diff --git a/cpp_src/tools/customlocal.cc b/cpp_src/tools/customlocal.cc index 9a6fc1661..f9d51b322 100644 --- a/cpp_src/tools/customlocal.cc +++ b/cpp_src/tools/customlocal.cc @@ -12,16 +12,17 @@ constexpr std::pair kAlphabet[] = { {0x006F, 0x004F}, {0x0070, 0x0050}, {0x0071, 0x0051}, {0x0072, 0x0052}, {0x0073, 0x0053}, {0x0074, 0x0054}, {0x0075, 0x0055}, {0x0076, 0x0056}, {0x0077, 0x0057}, {0x0078, 0x0058}, {0x0079, 0x0059}, {0x007A, 0x005A}, //Latin-1 Supplement - {0x00E0, 0x00C0}, {0x00E1, 0x00C1}, {0x00E2, 0x00C2}, {0x00E3, 0x00C3}, {0x00E4, 0x00C4}, {0x00E5, 0x00C5}, {0x00E6, 0x00C6}, + {0x00DF,0x00DF}, + {0x00E0, 0x00C0}, {0x00E1, 0x00C1}, {0x00E2, 0x00C2}, {0x00E3, 0x00C3}, {0x00E4, 0x00C4}, {0x00E5, 0x00C5}, {0x00E6, 0x00C6}, {0x00E7, 0x00C7}, {0x00E8, 0x00C8}, {0x00E9, 0x00C9}, {0x00EA, 0x00CA}, {0x00EB, 0x00CB}, {0x00EC, 0x00CC}, {0x00ED, 0x00CD}, {0x00EE, 0x00CE}, {0x00EF, 0x00CF}, {0x00F0, 0x00D0}, {0x00F1, 0x00D1}, {0x00F2, 0x00D2}, {0x00F3, 0x00D3}, {0x00F4, 0x00D4}, {0x00F5, 0x00D5}, {0x00F6, 0x00D6}, {0x00F8, 0x00D8}, {0x00F9, 0x00D9}, {0x00FA, 0x00DA}, {0x00FB, 0x00DB}, {0x00FC, 0x00DC}, - {0x00FD, 0x00DD}, {0x00FE, 0x00DE}, {0x00FF, 0x0178}, + {0x00FD, 0x00DD}, {0x00FE, 0x00DE}, {0x00FF, 0x0178}, // Latin Extended-A {0x0101, 0x0100}, {0x0103, 0x0102}, {0x0105, 0x0104}, {0x0107, 0x0106}, {0x0109, 0x0108}, {0x010B, 0x010A}, {0x010D, 0x010C}, {0x010F, 0x010E}, {0x0111, 0x0110}, {0x0113, 0x0112}, {0x0115, 0x0114}, {0x0117, 0x0116}, {0x0119, 0x0118}, {0x011B, 0x011A}, {0x011D, 0x011C}, {0x011F, 0x011E}, {0x0121, 0x0120}, {0x0123, 0x0122}, {0x0125, 0x0124}, {0x0127, 0x0126}, {0x0129, 0x0128}, - {0x012B, 0x012A}, {0x012D, 0x012C}, {0x012F, 0x012E}, {0x0131, 0x0049}, {0x0133, 0x0132}, {0x0135, 0x0134}, {0x0137, 0x0136}, + {0x012B, 0x012A}, {0x012D, 0x012C}, {0x012F, 0x012E}, {0x0131, 0x0131}, {0x0133, 0x0132}, {0x0135, 0x0134}, {0x0137, 0x0136}, {0x013A, 0x0139}, {0x013C, 0x013B}, {0x013E, 0x013D}, {0x0140, 0x013F}, {0x0142, 0x0141}, {0x0144, 0x0143}, {0x0146, 0x0145}, {0x0148, 0x0147}, {0x014B, 0x014A}, {0x014D, 0x014C}, {0x014F, 0x014E}, {0x0151, 0x0150}, {0x0153, 0x0152}, {0x0155, 0x0154}, {0x0157, 0x0156}, {0x0159, 0x0158}, {0x015B, 0x015A}, {0x015D, 0x015C}, {0x015F, 0x015E}, {0x0161, 0x0160}, {0x0163, 0x0162}, @@ -191,7 +192,25 @@ constexpr int checkAlphabetSorted() { } return -1; } + +constexpr bool checkAlphabetLowUpperUtf8SizeEquals() { + for (int i = 0; i < std::end(kAlphabet) - std::begin(kAlphabet); i++) { + if (kAlphabet[i].first < 0x80 && kAlphabet[i].second < 0x80) { + continue; + } else if ((kAlphabet[i].first >= 0x80 && kAlphabet[i].first < 0x800) && + (kAlphabet[i].second >= 0x80 && kAlphabet[i].second < 0x800)) { + continue; + } else if (kAlphabet[i].first >= 0x800 && kAlphabet[i].second >= 0x800) { + continue; + } else { + return false; + } + } + return true; +} + static_assert(checkAlphabetSorted() == -1, "Alphabet must be sorted"); +static_assert(checkAlphabetLowUpperUtf8SizeEquals(), "The length of utf8 capital and small letters must be the same"); class CustomLocale { public: @@ -219,6 +238,7 @@ class CustomLocale { } } } + wchar_t ToLower(wchar_t ch) const noexcept { uint32_t ofs = ch; if (ofs < UINT16_MAX) { @@ -257,5 +277,4 @@ void ToLower(std::wstring& data) noexcept { kCustomLocale.ToLower(data); } wchar_t ToLower(wchar_t ch) noexcept { return kCustomLocale.ToLower(ch); } bool IsAlpha(wchar_t ch) noexcept { return kCustomLocale.IsAlpha(ch); } - } // namespace reindexer diff --git a/cpp_src/tools/errors.h b/cpp_src/tools/errors.h index 7ff62211b..a33e3f351 100644 --- a/cpp_src/tools/errors.h +++ b/cpp_src/tools/errors.h @@ -110,11 +110,11 @@ void assertf_fmt(const char *fmt, const Args &...args) { fmt::fprintf(std::cerr, fmt, args...); } -#define assertf(e, fmt, ...) \ - if (!(e)) { \ - assertf_fmt("%s:%d: failed assertion '%s':\n" fmt, __FILE__, __LINE__, #e, __VA_ARGS__); \ - debug::print_crash_query(std::cerr); \ - abort(); \ +#define assertf(e, fmt, ...) \ + if rx_unlikely (!(e)) { \ + reindexer::assertf_fmt("%s:%d: failed assertion '%s':\n" fmt, __FILE__, __LINE__, #e, __VA_ARGS__); \ + reindexer::debug::print_crash_query(std::cerr); \ + abort(); \ } #endif // NDEBUG #endif // REINDEX_CORE_BUILD diff --git a/cpp_src/tools/fsops.cc b/cpp_src/tools/fsops.cc index 8d0af2f76..e7d8db748 100644 --- a/cpp_src/tools/fsops.cc +++ b/cpp_src/tools/fsops.cc @@ -39,7 +39,36 @@ int RmDirAll(const std::string &path) noexcept { return nftw( path.c_str(), [](const char *fpath, const struct stat *, int, struct FTW *) { return ::remove(fpath); }, 64, FTW_DEPTH | FTW_PHYS); #else - (void)path; + WIN32_FIND_DATA entry; + if (HANDLE hFind = FindFirstFile((path + "/*.*").c_str(), &entry); hFind != INVALID_HANDLE_VALUE) { + std::string dirPath; + do { + if (strncmp(entry.cFileName, ".", 2) == 0 || strncmp(entry.cFileName, "..", 3) == 0) { + continue; + } + const bool isDir = entry.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY; + dirPath.clear(); + dirPath.append(path).append("/").append(entry.cFileName); + if (isDir) { + if (int ret = RmDirAll(dirPath); ret < 0) { + FindClose(hFind); + return ret; + } + } else { + if (!DeleteFile(dirPath.c_str())) { + FindClose(hFind); + fprintf(stderr, "Unable to remove file '%s'\n", dirPath.c_str()); + return -1; + } + } + } while (FindNextFile(hFind, &entry)); + FindClose(hFind); + if (!RemoveDirectory(path.c_str())) { + fprintf(stderr, "Unable to remove directory '%s'\n", path.c_str()); + return -1; + } + } + return 0; #endif } diff --git a/cpp_src/tools/json2kv.h b/cpp_src/tools/json2kv.h index ccdc5bd66..c5e168f50 100644 --- a/cpp_src/tools/json2kv.h +++ b/cpp_src/tools/json2kv.h @@ -6,6 +6,6 @@ namespace reindexer { -Variant jsonValue2Variant(const gason::JsonValue& v, KeyValueType t, std::string_view fieldName = std::string_view()); // TODO may be ""sv +Variant jsonValue2Variant(const gason::JsonValue& v, KeyValueType t, std::string_view fieldName = std::string_view()); } // namespace reindexer diff --git a/cpp_src/tools/jsonstring.h b/cpp_src/tools/jsonstring.h index 7bf09f2af..34c6eeb56 100644 --- a/cpp_src/tools/jsonstring.h +++ b/cpp_src/tools/jsonstring.h @@ -42,12 +42,14 @@ inline std::string_view to_string_view(const uint8_t *p) noexcept { const auto len = length(p); uintptr_t uptr; static_assert(sizeof(uintptr_t) == 8 || sizeof(uintptr_t) == 4, "Expecting sizeof uintptr to be equal 4 or 8 bytes"); - if constexpr (sizeof(uintptr_t) == 8) { - uptr = uintptr_t(p[-2]) | (uintptr_t(p[-3]) << 8) | (uintptr_t(p[-4]) << 16) | (uintptr_t(p[-5]) << 24) | - (uintptr_t(p[-6]) << 32) | (uintptr_t(p[-7]) << 40) | (uintptr_t(p[-8]) << 48) | (uintptr_t(p[-9]) << 56); - } else { - uptr = uintptr_t(p[-2]) | (uintptr_t(p[-3]) << 8) | (uintptr_t(p[-4]) << 16) | (uintptr_t(p[-5]) << 24); - } +#if UINTPTR_MAX == 0xFFFFFFFF + uptr = uintptr_t(p[-2]) | (uintptr_t(p[-3]) << 8) | (uintptr_t(p[-4]) << 16) | (uintptr_t(p[-5]) << 24); +#elif UINTPTR_MAX == 0xFFFFFFFFFFFFFFFF + uptr = uintptr_t(p[-2]) | (uintptr_t(p[-3]) << 8) | (uintptr_t(p[-4]) << 16) | (uintptr_t(p[-5]) << 24) | (uintptr_t(p[-6]) << 32) | + (uintptr_t(p[-7]) << 40) | (uintptr_t(p[-8]) << 48) | (uintptr_t(p[-9]) << 56); +#else + static_assert(false, "Unexpected uintptr_t size"); +#endif return std::string_view(reinterpret_cast(uptr), len); } const auto len = length(p); diff --git a/cpp_src/tools/logger.cc b/cpp_src/tools/logger.cc index 4c0700d78..f4936ab86 100644 --- a/cpp_src/tools/logger.cc +++ b/cpp_src/tools/logger.cc @@ -45,11 +45,6 @@ void logInstallWriter(LogWriter writer, LoggerPolicy policy) { #if defined(REINDEX_WITH_ASAN) || defined(REINDEX_WITH_TSAN) || defined(RX_WITH_STDLIB_DEBUG) std::abort(); #else - lck.unlock(); - if (writer) { - errorText.append(". THIS logger is not active"); - writer(LogError, &errorText[0]); - } return; #endif } diff --git a/cpp_src/tools/oscompat.h b/cpp_src/tools/oscompat.h index ef3ac33b2..ddba84e04 100644 --- a/cpp_src/tools/oscompat.h +++ b/cpp_src/tools/oscompat.h @@ -6,7 +6,7 @@ #define WIN32_LEAN_AND_MEAN #endif -//#define _CRT_SECURE_NO_WARNINGS +// #define _CRT_SECURE_NO_WARNINGS #ifndef NOMINMAX #define NOMINMAX @@ -40,6 +40,7 @@ #include #include #include +#include #include #endif diff --git a/cpp_src/tools/serializer.h b/cpp_src/tools/serializer.h index 91a155751..63d31ecd2 100644 --- a/cpp_src/tools/serializer.h +++ b/cpp_src/tools/serializer.h @@ -149,11 +149,10 @@ class Serializer { class WrSerializer { public: - WrSerializer() noexcept : buf_(inBuf_), len_(0), cap_(sizeof(inBuf_)) {} // -V730 + WrSerializer() noexcept : buf_(inBuf_), len_(0), cap_(sizeof(inBuf_)) {} template - WrSerializer(uint8_t (&buf)[N]) : buf_(buf), len_(0), cap_(N), hasExternalBuf_(true) {} // -V730 - WrSerializer(chunk &&ch) noexcept // -V730 - : buf_(ch.data_), len_(ch.len_), cap_(ch.cap_) { + WrSerializer(uint8_t (&buf)[N]) noexcept : buf_(buf), len_(0), cap_(N), hasExternalBuf_(true) {} + WrSerializer(chunk &&ch) noexcept : buf_(ch.data_), len_(ch.len_), cap_(ch.cap_) { if (!buf_) { buf_ = inBuf_; cap_ = sizeof(inBuf_); @@ -165,8 +164,7 @@ class WrSerializer { ch.offset_ = 0; } WrSerializer(const WrSerializer &) = delete; - WrSerializer(WrSerializer &&other) noexcept // -V730 - : len_(other.len_), cap_(other.cap_), hasExternalBuf_(other.hasExternalBuf_) { + WrSerializer(WrSerializer &&other) noexcept : len_(other.len_), cap_(other.cap_), hasExternalBuf_(other.hasExternalBuf_) { if (other.buf_ == other.inBuf_) { buf_ = inBuf_; memcpy(buf_, other.buf_, other.len_ * sizeof(other.inBuf_[0])); @@ -210,7 +208,6 @@ class WrSerializer { bool HasAllocatedBuffer() const noexcept { return buf_ != inBuf_ && !hasExternalBuf_; } void PutKeyValueType(KeyValueType t) { PutVarUint(t.toNumber()); } - // Put variant void PutVariant(const Variant &kv) { PutKeyValueType(kv.Type()); kv.Type().EvaluateOneOf( @@ -222,19 +219,17 @@ class WrSerializer { } }, [&](OneOf) { putRawVariant(kv); }); - } - -private: - void putRawVariant(const Variant &kv) { - kv.Type().EvaluateOneOf([&](KeyValueType::Bool) { PutBool(bool(kv)); }, [&](KeyValueType::Int64) { PutVarint(int64_t(kv)); }, - [&](KeyValueType::Int) { PutVarint(int(kv)); }, [&](KeyValueType::Double) { PutDouble(double(kv)); }, - [&](KeyValueType::String) { PutVString(std::string_view(kv)); }, [&](KeyValueType::Null) noexcept {}, - [&](KeyValueType::Uuid) { PutUuid(Uuid{kv}); }, - [&](OneOf) { - fprintf(stderr, "Unknown keyType %s\n", kv.Type().Name().data()); - abort(); - }); + KeyValueType::Composite, KeyValueType::Undefined, KeyValueType::Null, KeyValueType::Uuid>) { + kv.Type().EvaluateOneOf( + [&](KeyValueType::Bool) { PutBool(bool(kv)); }, [&](KeyValueType::Int64) { PutVarint(int64_t(kv)); }, + [&](KeyValueType::Int) { PutVarint(int(kv)); }, [&](KeyValueType::Double) { PutDouble(double(kv)); }, + [&](KeyValueType::String) { PutVString(std::string_view(kv)); }, [&](KeyValueType::Null) noexcept {}, + [&](KeyValueType::Uuid) { PutUuid(Uuid{kv}); }, + [&](OneOf) { + fprintf(stderr, "Unknown keyType %s\n", kv.Type().Name().data()); + abort(); + }); + }); } public: diff --git a/cpp_src/tools/stringstools.cc b/cpp_src/tools/stringstools.cc index ab9cfa83d..0634f1227 100644 --- a/cpp_src/tools/stringstools.cc +++ b/cpp_src/tools/stringstools.cc @@ -147,7 +147,7 @@ bool is_number(std::string_view str) { } void split(std::string_view str, std::string &buf, std::vector &words, const std::string &extraWordSymbols) { - //assuming that the 'ToLower' function and the 'check for replacement' function should not change the character size in bytes + // assuming that the 'ToLower' function and the 'check for replacement' function should not change the character size in bytes buf.resize(str.length()); words.resize(0); auto bufIt = buf.begin(); @@ -185,29 +185,29 @@ Pos wordToByteAndCharPos(std::string_view str, int wordPosition, const std::stri auto wordEndIt = str.begin(); auto it = str.begin(); Pos wp; - const bool constexpr neadChar = std::is_same_v; - if constexpr (neadChar) { + const bool constexpr needChar = std::is_same_v; + if constexpr (needChar) { wp.start.ch = -1; } for (; it != str.end();) { auto ch = utf8::unchecked::next(it); - if constexpr (neadChar) { + if constexpr (needChar) { wp.start.ch++; } // skip not word symbols while (it != str.end() && extraWordSymbols.find(ch) == std::string::npos && !IsAlpha(ch) && !IsDigit(ch)) { wordStartIt = it; ch = utf8::unchecked::next(it); - if constexpr (neadChar) { + if constexpr (needChar) { wp.start.ch++; } } - if constexpr (neadChar) { + if constexpr (needChar) { wp.end.ch = wp.start.ch; } while (IsAlpha(ch) || IsDigit(ch) || extraWordSymbols.find(ch) != std::string::npos) { wordEndIt = it; - if constexpr (neadChar) { + if constexpr (needChar) { wp.end.ch++; } if (it == str.end()) { @@ -224,7 +224,7 @@ Pos wordToByteAndCharPos(std::string_view str, int wordPosition, const std::stri wordStartIt = it; } } - if constexpr (neadChar) { + if constexpr (needChar) { wp.start.ch = wp.end.ch; } } @@ -609,14 +609,27 @@ int getUTF8StringCharactersCount(std::string_view str) noexcept { int stoi(std::string_view sl) { bool valid; - return jsteemann::atoi(sl.data(), sl.data() + sl.size(), valid); + const int res = jsteemann::atoi(sl.data(), sl.data() + sl.size(), valid); + if (!valid) { + throw Error(errParams, "Can't convert '%s' to number", sl); + } + return res; +} + +std::optional try_stoi(std::string_view sl) { + bool valid; + const int res = jsteemann::atoi(sl.data(), sl.data() + sl.size(), valid); + if (!valid) { + return std::nullopt; + } + return res; } int64_t stoll(std::string_view sl) { bool valid; auto ret = jsteemann::atoi(sl.data(), sl.data() + sl.size(), valid); if (!valid) { - throw Error(errParams, "Can't convert %s to number", sl); + throw Error(errParams, "Can't convert '%s' to number", sl); } return ret; } diff --git a/cpp_src/tools/stringstools.h b/cpp_src/tools/stringstools.h index 80731c581..ed0e1a41b 100644 --- a/cpp_src/tools/stringstools.h +++ b/cpp_src/tools/stringstools.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,12 @@ inline std::string_view skipSpace(std::string_view str) { return str.substr(i); } +template +bool strEmpty(const Str& str) noexcept { + return str.empty(); +} +inline bool strEmpty(const char* str) noexcept { return str[0] == '\0'; } + template Container& split(const typename Container::value_type& str, std::string_view delimiters, bool trimEmpty, Container& tokens) { tokens.resize(0); @@ -136,11 +143,12 @@ int fast_strftime(char* buf, const tm* tm); std::string urldecode2(std::string_view str); int stoi(std::string_view sl); +std::optional try_stoi(std::string_view sl); int64_t stoll(std::string_view sl); bool validateObjectName(std::string_view name, bool allowSpecialChars) noexcept; bool validateUserNsName(std::string_view name) noexcept; -RX_ALWAYS_INLINE bool isSystemNamespaceNameFast(std::string_view name) noexcept { return name.size() && name[0] == '#'; } +RX_ALWAYS_INLINE bool isSystemNamespaceNameFast(std::string_view name) noexcept { return !name.empty() && name[0] == '#'; } LogLevel logLevelFromString(const std::string& strLogLevel); StrictMode strictModeFromString(const std::string& strStrictMode); std::string_view strictModeToString(StrictMode mode); @@ -168,6 +176,9 @@ bool checkIfStartsWith(std::string_view pattern, std::string_view src) noexcept; RX_ALWAYS_INLINE bool checkIfStartsWith(std::string_view pattern, std::string_view src) noexcept { return checkIfStartsWith(pattern, src); } +RX_ALWAYS_INLINE bool checkIfStartsWithCS(std::string_view pattern, std::string_view src) noexcept { + return checkIfStartsWith(pattern, src); +} template bool checkIfEndsWith(std::string_view pattern, std::string_view src) noexcept; @@ -232,9 +243,29 @@ struct hash_str { size_t operator()(const std::string& hs) const noexcept { return collateHash(hs); } }; -inline void deepCopy(std::string& dst, const std::string& src) { +RX_ALWAYS_INLINE void deepCopy(std::string& dst, const std::string& src) { dst.resize(src.size()); std::memcpy(&dst[0], &src[0], src.size()); } +constexpr size_t kTmpNsPostfixLen = 20; +constexpr std::string_view kTmpNsSuffix = "_tmp_"; +constexpr char kTmpNsPrefix = '@'; +RX_ALWAYS_INLINE bool isTmpNamespaceNameFast(std::string_view name) noexcept { return !name.empty() && name[0] == kTmpNsPrefix; } +[[nodiscard]] inline std::string createTmpNamespaceName(std::string_view baseName) { + return std::string({kTmpNsPrefix}).append(baseName).append(kTmpNsSuffix).append(randStringAlph(kTmpNsPostfixLen)); +} +[[nodiscard]] inline std::string_view demangleTmpNamespaceName(std::string_view tmpNsName) noexcept { + if (tmpNsName.size() < kTmpNsPostfixLen + kTmpNsSuffix.size() + 1) { + return tmpNsName; + } + if (tmpNsName[0] != kTmpNsPrefix) { + return tmpNsName; + } + if (tmpNsName.substr(tmpNsName.size() - kTmpNsPostfixLen - kTmpNsSuffix.size(), kTmpNsSuffix.size()) != kTmpNsSuffix) { + return tmpNsName; + } + return tmpNsName.substr(1, tmpNsName.size() - kTmpNsPostfixLen - 1 - kTmpNsSuffix.size()); +} + } // namespace reindexer diff --git a/cpp_src/tools/varint.h b/cpp_src/tools/varint.h index a61baf2bc..028c5fdc5 100644 --- a/cpp_src/tools/varint.h +++ b/cpp_src/tools/varint.h @@ -8,8 +8,8 @@ #pragma warning(disable : 4267 4146) #endif -#include -#include +#include +#include /** * Return the ZigZag-encoded 32-bit unsigned integer form of a 32-bit signed @@ -20,7 +20,7 @@ * \return * ZigZag encoded integer. */ -static inline uint32_t zigzag32(int32_t v) noexcept { +inline uint32_t zigzag32(int32_t v) noexcept { if (v < 0) return (-(uint32_t)v) * 2 - 1; else @@ -36,7 +36,7 @@ static inline uint32_t zigzag32(int32_t v) noexcept { * \return * ZigZag encoded integer. */ -static inline uint64_t zigzag64(int64_t v) noexcept { +inline uint64_t zigzag64(int64_t v) noexcept { if (v < 0) return (-(uint64_t)v) * 2 - 1; else @@ -54,7 +54,7 @@ static inline uint64_t zigzag64(int64_t v) noexcept { * \return * Number of bytes written to `out`. */ -static inline size_t uint32_pack(uint32_t value, uint8_t *out) noexcept { +inline size_t uint32_pack(uint32_t value, uint8_t *out) noexcept { unsigned rv = 0; if (value >= 0x80) { @@ -89,7 +89,7 @@ static inline size_t uint32_pack(uint32_t value, uint8_t *out) noexcept { * \return * Number of bytes written to `out`. */ -static inline size_t int32_pack(int32_t value, uint8_t *out) noexcept { +inline size_t int32_pack(int32_t value, uint8_t *out) noexcept { if (value < 0) { out[0] = value | 0x80; out[1] = (value >> 7) | 0x80; @@ -115,7 +115,7 @@ static inline size_t int32_pack(int32_t value, uint8_t *out) noexcept { * \return * Number of bytes written to `out`. */ -static inline size_t sint32_pack(int32_t value, uint8_t *out) noexcept { return uint32_pack(zigzag32(value), out); } +inline size_t sint32_pack(int32_t value, uint8_t *out) noexcept { return uint32_pack(zigzag32(value), out); } /** * Pack a 64-bit unsigned integer using base-128 varint encoding and return the @@ -128,7 +128,7 @@ static inline size_t sint32_pack(int32_t value, uint8_t *out) noexcept { return * \return * Number of bytes written to `out`. */ -static size_t uint64_pack(uint64_t value, uint8_t *out) noexcept { +inline size_t uint64_pack(uint64_t value, uint8_t *out) noexcept { uint32_t hi = (uint32_t)(value >> 32); uint32_t lo = (uint32_t)value; unsigned rv; @@ -165,15 +165,15 @@ static size_t uint64_pack(uint64_t value, uint8_t *out) noexcept { * \return * Number of bytes written to `out`. */ -static inline size_t sint64_pack(int64_t value, uint8_t *out) noexcept { return uint64_pack(zigzag64(value), out); } +inline size_t sint64_pack(int64_t value, uint8_t *out) noexcept { return uint64_pack(zigzag64(value), out); } -static inline size_t boolean_pack(bool value, uint8_t *out) noexcept { +inline size_t boolean_pack(bool value, uint8_t *out) noexcept { *out = value ? 1 : 0; return 1; } -static inline size_t string_pack(const char *str, uint8_t *out) noexcept { - if (str == NULL) { +inline size_t string_pack(const char *str, uint8_t *out) noexcept { + if (str == nullptr) { out[0] = 0; return 1; } else { @@ -184,8 +184,8 @@ static inline size_t string_pack(const char *str, uint8_t *out) noexcept { } } -static inline size_t string_pack(const char *str, const size_t len, uint8_t *out) noexcept { - if (str == NULL) { +inline size_t string_pack(const char *str, const size_t len, uint8_t *out) noexcept { + if (str == nullptr) { out[0] = 0; return 1; } else { @@ -195,7 +195,7 @@ static inline size_t string_pack(const char *str, const size_t len, uint8_t *out } } -static inline uint32_t parse_uint32(unsigned len, const uint8_t *data) noexcept { +inline uint32_t parse_uint32(unsigned len, const uint8_t *data) noexcept { uint32_t rv = data[0] & 0x7f; if (len > 1) { rv |= ((uint32_t)(data[1] & 0x7f) << 7); @@ -210,22 +210,22 @@ static inline uint32_t parse_uint32(unsigned len, const uint8_t *data) noexcept return rv; } -static inline uint32_t parse_int32(unsigned len, const uint8_t *data) noexcept { return parse_uint32(len, data); } +inline uint32_t parse_int32(unsigned len, const uint8_t *data) noexcept { return parse_uint32(len, data); } -static inline int32_t unzigzag32(uint32_t v) noexcept { +inline int32_t unzigzag32(uint32_t v) noexcept { if (v & 1) return -(v >> 1) - 1; else return v >> 1; } -static inline uint32_t parse_fixed_uint32(const uint8_t *data) noexcept { +inline uint32_t parse_fixed_uint32(const uint8_t *data) noexcept { uint32_t t; memcpy(&t, data, 4); return t; } -static inline uint64_t parse_uint64(unsigned len, const uint8_t *data) noexcept { +inline uint64_t parse_uint64(unsigned len, const uint8_t *data) noexcept { unsigned shift, i; uint64_t rv; @@ -240,20 +240,20 @@ static inline uint64_t parse_uint64(unsigned len, const uint8_t *data) noexcept return rv; } -static inline int64_t unzigzag64(uint64_t v) noexcept { +inline int64_t unzigzag64(uint64_t v) noexcept { if (v & 1) return -(v >> 1) - 1; else return v >> 1; } -static inline uint64_t parse_fixed_uint64(const uint8_t *data) noexcept { +inline uint64_t parse_fixed_uint64(const uint8_t *data) noexcept { uint64_t t; memcpy(&t, data, 8); return t; } -static inline unsigned scan_varint(unsigned len, const uint8_t *data) noexcept { +inline unsigned scan_varint(unsigned len, const uint8_t *data) noexcept { unsigned i; if (len > 10) len = 10; for (i = 0; i < len; i++) diff --git a/cpp_src/tools/verifying_updater.h b/cpp_src/tools/verifying_updater.h new file mode 100644 index 000000000..8bd0967f3 --- /dev/null +++ b/cpp_src/tools/verifying_updater.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +namespace reindexer { + +template +class VerifyingUpdater { + using BaseType = BT; + using FieldType = FT; + +public: + VerifyingUpdater(BaseType& base) noexcept : base_{base} {} + operator FieldType&() & noexcept { return Get(); } + FieldType& Get() & noexcept { + touched_ = true; + return base_.*field; + } + ~VerifyingUpdater() noexcept(false) { + if (touched_ && std::uncaught_exceptions() == 0) { + (base_.*Verify)(); + } + } + +private: + BaseType& base_; + bool touched_{false}; +}; + +} // namespace reindexer diff --git a/cpp_src/vendor/itoa/itoa.cc b/cpp_src/vendor/itoa/itoa.cc index 7d524293c..4e7547ded 100644 --- a/cpp_src/vendor/itoa/itoa.cc +++ b/cpp_src/vendor/itoa/itoa.cc @@ -1,3 +1,4 @@ +#include "itoa.h" #include static const char gDigitsLut[200] = { @@ -236,7 +237,7 @@ char *i64toa(int64_t value, char *buffer) { return u64toa(u, buffer); } -char hex_lut[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; +static char hex_lut[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; char *u32toax(uint32_t value, char *buffer, int n) { if (!n) { diff --git a/describer.go b/describer.go index 7acebbdd4..ed494bed8 100644 --- a/describer.go +++ b/describer.go @@ -384,6 +384,50 @@ type DBProfilingConfig struct { LongQueryLogging *LongQueryLoggingConfig `json:"long_queries_logging,omitempty"` } +type NamespaceCacheConfig struct { + // Max size of the index IdSets cache in bytes (per index) + // Each index has it's own independant cache + // This cache is used in any selections to store resulting sets of internal document IDs (it does not stores documents' content itself) + // Default value is 134217728 (128 MB). Min value is 0 + IdxIdsetCacheSize uint64 `json:"index_idset_cache_size"` + // Default 'hits to cache' for index IdSets caches + // This value determines how many requests required to put results into cache + // For example with value of 2: first request will be executed without caching, second request will generate cache entry and put results into the cache and third request will get cached results + // This value may be automatically increased if cache is invalidation too fast + // Default value is 2. Min value is 0 + IdxIdsetHitsToCache uint32 `json:"index_idset_hits_to_cache"` + // Max size of the fulltext indexes IdSets cache in bytes (per index) + // Each fulltext index has it's own independant cache + // This cache is used in any selections to store resulting sets of internal document IDs, FT ranks and highlighted areas (it does not stores documents' content itself) + // Default value is 134217728 (128 MB). Min value is 0 + FTIdxCacheSize uint64 `json:"ft_index_cache_size"` + // Default 'hits to cache' for fulltext index IdSets caches. + // This value determines how many requests required to put results into cache + // For example with value of 2: first request will be executed without caching, second request will generate cache entry and put results into the cache and third request will get cached results. This value may be automatically increased if cache is invalidation too fast + // Default value is 2. Min value is 0 + FTIdxHitsToCache uint32 `json:"ft_index_hits_to_cache"` + // Max size of the index IdSets cache in bytes for each namespace + // This cache will be enabled only if 'join_cache_mode' property is not 'off' + // It stores resulting IDs and any other 'preselect' information for the JOIN queries (when target namespace is right namespace of the JOIN) + // Default value is 268435456 (256 MB). Min value is 0 + JoinCacheSize uint64 `json:"joins_preselect_cache_size"` + // Default 'hits to cache' for joins preselect cache of the current namespace + // This value determines how many requests required to put results into cache + // For example with value of 2: first request will be executed without caching, second request will generate cache entry and put results into the cache and third request will get cached results + // This value may be automatically increased if cache is invalidation too fast + // Default value is 2. Min value is 0 + JoinHitsToCache uint32 `json:"joins_preselect_hit_to_cache"` + // Max size of the cache for COUNT_CACHED() aggreagetion in bytes for each namespace + // This cache stores resulting COUNTs and serialized queries for the COUNT_CACHED() aggregations + // Default value is 134217728 (128 MB). Min value is 0 + QueryCountCacheSize uint64 `json:"query_count_cache_size"` + // Default 'hits to cache' for COUNT_CACHED() aggregation of the current namespace + // This value determines how many requests required to put results into cache + // For example with value of 2: first request will be executed without caching, second request will generate cache entry and put results into the cache and third request will get cached results. This value may be automatically increased if cache is invalidation too fast + // Default value is 2. Min value is 0 + QueryCountHitsToCache uint32 `json:"query_count_hit_to_cache"` +} + // DBNamespacesConfig is part of reindexer configuration contains namespaces options type DBNamespacesConfig struct { // Name of namespace, or `*` for setting to all namespaces @@ -420,6 +464,8 @@ type DBNamespacesConfig struct { // 0 - disables synchronous storage flush. In this case storage will be flushed in background thread only // Default value is 20000 SyncStorageFlushLimit int `json:"sync_storage_flush_limit"` + // Namespaces' cache configs + CacheConfig *NamespaceCacheConfig `json:"cache,omitempty"` } // DBReplicationConfig is part of reindexer configuration contains replication options diff --git a/dsl/dsl.go b/dsl/dsl.go index 8d6cd5e49..0b9509219 100644 --- a/dsl/dsl.go +++ b/dsl/dsl.go @@ -305,7 +305,10 @@ func (f *Filter) parseValue(data string) error { f.Value, err = f.parseValuesArray(rawValues) return err case "any", "empty": - f.Value = 0 + if len(data) != 0 && data != `""` && data != "null" { + return fmt.Errorf("filter expects no arguments or null for '%s' condition", f.Cond) + } + f.Value = nil default: return fmt.Errorf("cond type '%s' not found", f.Cond) } diff --git a/readme.md b/readme.md index abcd4f415..c4a73ae30 100644 --- a/readme.md +++ b/readme.md @@ -151,7 +151,7 @@ import ( // choose how the Reindexer binds to the app (in this case "builtin," which means link Reindexer as a static library) _ "github.com/restream/reindexer/v3/bindings/builtin" - // OR use Reindexer as standalone server and connect to it via TCP. + // OR use Reindexer as standalone server and connect to it via TCP or unix domain socket (if available). // _ "github.com/restream/reindexer/v3/bindings/cproto" // OR link Reindexer as static library with bundled server. @@ -172,11 +172,17 @@ func main() { // Init a database instance and choose the binding (builtin) db := reindexer.NewReindex("builtin:///tmp/reindex/testdb") - // OR - Init a database instance and choose the binding (connect to server) + // OR - Init a database instance and choose the binding (connect to server via TCP sockets) // Database should be created explicitly via reindexer_tool or via WithCreateDBIfMissing option: // If server security mode is enabled, then username and password are mandatory // db := reindexer.NewReindex("cproto://user:pass@127.0.0.1:6534/testdb", reindexer.WithCreateDBIfMissing()) + // OR - Init a database instance and choose the binding (connect to server via unix domain sockets) + // Unix domain sockets are available on the unix systems only (socket file has to be explicitly set on the server's side with '--urpcaddr' option) + // Database should be created explicitly via reindexer_tool or via WithCreateDBIfMissing option: + // If server security mode is enabled, then username and password are mandatory + // db := reindexer.NewReindex("ucproto://user:pass@/tmp/reindexer.socket:/testdb", reindexer.WithCreateDBIfMissing()) + // OR - Init a database instance and choose the binding (builtin, with bundled server) // serverConfig := config.DefaultServerConfig () // If server security mode is enabled, then username and password are mandatory @@ -284,8 +290,8 @@ Reindexer can run in 3 different modes: - `embedded (builtin)` Reindexer is embedded into application as static library, and does not reuqire separate server proccess. - `embedded with server (builtinserver)` Reindexer is embedded into application as static library, and start server. In this mode other - clients can connect to application via cproto or http. -- `standalone` Reindexer run as standalone server, application connects to Reindexer via network + clients can connect to application via cproto, ucproto or http. +- `standalone` Reindexer run as standalone server, application connects to Reindexer via network or unix domain sockets. ### Installation for server mode diff --git a/reindexer_impl.go b/reindexer_impl.go index c1afe3fb7..b545e2c92 100644 --- a/reindexer_impl.go +++ b/reindexer_impl.go @@ -816,7 +816,7 @@ func (db *reindexerImpl) addFilterDSL(filter *dsl.Filter, q *Query) error { if err != nil { return err } - if filter.Value != nil { + if filter.Value != nil || cond == bindings.ANY || cond == bindings.EMPTY { // Skip filter if value is nil (for backwards compatibility reasons) q.Where(filter.Field, cond, filter.Value) } else { diff --git a/test/builtinserver_test.go b/test/builtinserver_test.go index 088b7e5c9..978e23c6d 100644 --- a/test/builtinserver_test.go +++ b/test/builtinserver_test.go @@ -23,6 +23,7 @@ func TestBuiltinServer(t *testing.T) { os.RemoveAll(cfg1.Storage.Path) rx1 := reindexer.NewReindex("builtinserver://xxx", reindexer.WithServerConfig(time.Second*100, cfg1)) + defer rx1.Close() assert.NoError(t, rx1.Status().Err) assert.NoError(t, rx1.OpenNamespace("testns", reindexer.DefaultNamespaceOptions(), &ScvTestItem{})) @@ -33,15 +34,29 @@ func TestBuiltinServer(t *testing.T) { os.RemoveAll(cfg2.Storage.Path) rx2 := reindexer.NewReindex("builtinserver://xxx", reindexer.WithServerConfig(time.Second*100, cfg2)) + defer rx2.Close() assert.NoError(t, rx2.Status().Err) assert.NoError(t, rx2.OpenNamespace("testns", reindexer.DefaultNamespaceOptions(), &ScvTestItem{})) rx3 := reindexer.NewReindex("cproto://127.0.0.1:26535/xxx") + defer rx3.Close() assert.NoError(t, rx3.Status().Err) assert.NoError(t, rx3.OpenNamespace("testns", reindexer.DefaultNamespaceOptions(), &ScvTestItem{})) - rx3.Close() - rx2.Close() - rx1.Close() - + cfg4 := config.DefaultServerConfig() + cfg4.Net.HTTPAddr = "0:29090" + cfg4.Net.RPCAddr = "0:26536" + cfg4.Storage.Path = "/tmp/rx_builtinserver_test4" + cfg4.Net.UnixRPCAddr = "/tmp/reindexer_builtinserver_test.sock" + + os.RemoveAll(cfg4.Storage.Path) + rx4 := reindexer.NewReindex("builtinserver://xxx", reindexer.WithServerConfig(time.Second*100, cfg4)) + defer rx4.Close() + assert.NoError(t, rx4.Status().Err) + assert.NoError(t, rx4.OpenNamespace("testns", reindexer.DefaultNamespaceOptions(), &ScvTestItem{})) + + rx5 := reindexer.NewReindex("ucproto:///tmp/reindexer_builtinserver_test.sock:/xxx") + defer rx5.Close() + assert.NoError(t, rx5.Status().Err) + assert.NoError(t, rx5.OpenNamespace("testns", reindexer.DefaultNamespaceOptions(), &ScvTestItem{})) } diff --git a/test/multiple_json_paths_test.go b/test/multiple_json_paths_test.go new file mode 100644 index 000000000..886bd147a --- /dev/null +++ b/test/multiple_json_paths_test.go @@ -0,0 +1,532 @@ +package reindexer + +import ( + "math" + "math/rand" + "sort" + "strconv" + "testing" + + "github.com/restream/reindexer/v3" + "github.com/stretchr/testify/require" +) + +type TestItemAppendable struct { + ID int `json:"id" reindex:"id,,pk"` + Field1 int `json:"field1,omitempty" reindex:"idx,,appendable"` + Field2 int `json:"field2,omitempty" reindex:"idx,,appendable"` +} + +type TestArrItemAppendable struct { + ID int `json:"id" reindex:"id,,pk"` + ArrField1 []int `json:"arrfield1,omitempty" reindex:"arridx,,appendable"` + ArrField2 []int `json:"arrfield2,omitempty" reindex:"arridx,,appendable"` +} + +type TestJoinItemAppendable struct { + ID int `json:"id" reindex:"id,,pk"` + Field1 int `json:"field1,omitempty" reindex:"idx,,appendable"` + Field2 int `json:"field2,omitempty" reindex:"idx,,appendable"` + TestItemJoined []*TestItemAppendable `reindex:"test_joined,,joined"` +} + +type TestJoinArrItemAppendable struct { + ID int `json:"id" reindex:"id,,pk"` + ArrField1 []int `json:"arrfield1,omitempty" reindex:"arridx,,appendable"` + ArrField2 []int `json:"arrfield2,omitempty" reindex:"arridx,,appendable"` + TestArrItemJoined []*TestArrItemAppendable `reindex:"test_arr_joined,,joined"` +} + +type TestItemNestedAppendable struct { + ID int `json:"id" reindex:"id,,pk"` + NField int `json:"nfield,omitempty" reindex:"idx,,appendable"` + TestNested1 *TestItemNestedAppendableN1 `json:"test_nested_1,omitempty"` + TestNested2 *TestItemNestedAppendableN2 `json:"test_nested_2,omitempty"` +} + +type TestItemNestedAppendableN1 struct { + ID int `json:"id"` + NField int `json:"nfield,omitempty" reindex:"idx,,appendable"` +} + +type TestItemNestedAppendableN2 struct { + ID int `json:"id"` + NField int `json:"nfield,omitempty" reindex:"idx,,appendable"` +} + +type TestItemNestedArrAppendable struct { + ID int `json:"id" reindex:"id,,pk"` + NFieldArr []int `json:"nfield_arr,omitempty" reindex:"arridx,,appendable"` + TestNested1 *TestItemNestedArrAppendableN1 `json:"test_nested_1,omitempty"` + TestNested2 *TestItemNestedArrAppendableN2 `json:"test_nested_2,omitempty"` +} + +type TestItemNestedArrAppendableN1 struct { + ID int `json:"id"` + NFieldArr []int `json:"nfield_arr,omitempty" reindex:"arridx,,appendable"` +} + +type TestItemNestedArrAppendableN2 struct { + ID int `json:"id"` + NFieldArr []int `json:"nfield_arr,omitempty" reindex:"arridx,,appendable"` +} + +const TestSelectWithMultipleJsonPathsNs = "test_select_with_multiple_json_paths" +const TestSelectArrWithMultipleJsonPathsNs = "test_select_arr_with_multiple_json_paths" + +const TestJoinWithMultipleJsonPathsNs = "test_join_with_multiple_json_paths" +const TestJoinedWithMultipleJsonPathsNs = "test_joined_with_multiple_json_paths" +const TestJoinArrWithMultipleJsonPathsNs = "test_join_arr_with_multiple_json_paths" +const TestJoinedArrWithMultipleJsonPathsNs = "test_joined_arr_with_multiple_json_paths" + +const TestAggregWithMultipleJsonPathsNs = "test_aggreg_with_multiple_json_paths" +const TestAggregArrWithMultipleJsonPathsNs = "test_aggreg_arr_with_multiple_json_paths" + +const TestNestedWithMultipleJsonPathsNs = "test_nested_with_multiple_json_paths" +const TestNestedArrWithMultipleJsonPathsNs = "test_nested_arr_with_multiple_json_paths" + +func init() { + tnamespaces[TestSelectWithMultipleJsonPathsNs] = TestItemAppendable{} + tnamespaces[TestSelectArrWithMultipleJsonPathsNs] = TestArrItemAppendable{} + + tnamespaces[TestJoinWithMultipleJsonPathsNs] = TestJoinItemAppendable{} + tnamespaces[TestJoinedWithMultipleJsonPathsNs] = TestItemAppendable{} + tnamespaces[TestJoinArrWithMultipleJsonPathsNs] = TestJoinArrItemAppendable{} + tnamespaces[TestJoinedArrWithMultipleJsonPathsNs] = TestArrItemAppendable{} + + tnamespaces[TestAggregWithMultipleJsonPathsNs] = TestItemAppendable{} + tnamespaces[TestAggregArrWithMultipleJsonPathsNs] = TestArrItemAppendable{} + + tnamespaces[TestNestedWithMultipleJsonPathsNs] = TestItemNestedAppendable{} + tnamespaces[TestNestedArrWithMultipleJsonPathsNs] = TestItemNestedArrAppendable{} +} + +func checkResultItem(t *testing.T, it *reindexer.Iterator, item interface{}) { + defer it.Close() + require.Equal(t, 1, it.Count()) + for it.Next() { + require.EqualValues(t, item, it.Object()) + } +} + +func TestSelectWithMultipleJsonPaths(t *testing.T) { + t.Parallel() + + const ns = TestSelectWithMultipleJsonPathsNs + const ns2 = TestSelectArrWithMultipleJsonPathsNs + + testItem1 := TestItemAppendable{ID: 1, Field1: 10} + testItem2 := TestItemAppendable{ID: 2, Field2: 20} + testItem3 := TestItemAppendable{ID: 3, Field1: 30} + testItem4 := TestItemAppendable{ID: 4, Field2: 30} + + for _, item := range []TestItemAppendable{testItem1, testItem2, testItem3, testItem4} { + err := DB.Upsert(ns, item) + require.NoError(t, err) + } + + t.Run("test select with index multiple json paths", func(t *testing.T) { + it1 := DBD.Query(ns).Where("idx", reindexer.EQ, testItem1.Field1).MustExec() + checkResultItem(t, it1, &testItem1) + + it2 := DBD.Query(ns).Where("idx", reindexer.EQ, testItem2.Field2).MustExec() + checkResultItem(t, it2, &testItem2) + + items, err := DBD.Query(ns). + Where("idx", reindexer.EQ, testItem3.Field1).MustExec().FetchAll() + require.NoError(t, err) + expectedItems := []TestItemAppendable{testItem3, testItem4} + for i, v := range items { + require.EqualValues(t, &expectedItems[i], v.(*TestItemAppendable)) + } + }) + + testArrItem1 := TestArrItemAppendable{ID: 5, ArrField1: []int{50, 51}} + testArrItem2 := TestArrItemAppendable{ID: 6, ArrField2: []int{60, 61}} + testArrItem3 := TestArrItemAppendable{ID: 7, ArrField1: []int{70, 71}, ArrField2: []int{72, 73}} + + for _, item := range []TestArrItemAppendable{testArrItem1, testArrItem2, testArrItem3} { + err := DB.Upsert(ns2, item) + require.NoError(t, err) + } + + t.Run("test select with array index multiple json paths", func(t *testing.T) { + it1 := DBD.Query(ns2).Where("arridx", reindexer.EQ, testArrItem1.ArrField1[0]).MustExec() + checkResultItem(t, it1, &testArrItem1) + + it2 := DBD.Query(ns2).Where("arridx", reindexer.EQ, testArrItem2.ArrField2[0]).MustExec() + checkResultItem(t, it2, &testArrItem2) + + it3 := DBD.Query(ns2).Where("arridx", reindexer.EQ, testArrItem3.ArrField1[0]).MustExec() + checkResultItem(t, it3, &testArrItem3) + + it4 := DBD.Query(ns2).Where("arridx", reindexer.EQ, testArrItem3.ArrField2[0]).MustExec() + checkResultItem(t, it4, &testArrItem3) + }) + + t.Run("can't sort with appendable tag", func(t *testing.T) { + _, err := DBD.Query(ns).Sort("idx", false).Exec().FetchAll() + require.ErrorContains(t, err, "Sorting cannot be applied to array field.") + + _, err = DBD.Query(ns2).Sort("arridx", false).Exec().FetchAll() + require.ErrorContains(t, err, "Sorting cannot be applied to array field.") + }) +} + +func TestJoinWithMultipleJsonPaths(t *testing.T) { + t.Parallel() + + const ns = TestJoinWithMultipleJsonPathsNs + const nsj = TestJoinedWithMultipleJsonPathsNs + + testItem11 := TestJoinItemAppendable{ID: 1, Field1: 10} + testItem12 := TestJoinItemAppendable{ID: 2, Field2: 20} + testItem13 := TestJoinItemAppendable{ID: 3, Field1: 30} + for _, item := range []TestJoinItemAppendable{testItem11, testItem12, testItem13} { + err := DB.Upsert(ns, item) + require.NoError(t, err) + } + + testItem21 := TestItemAppendable{ID: 1, Field1: 10} + testItem22 := TestItemAppendable{ID: 2, Field2: 20} + testItem23 := TestItemAppendable{ID: 3, Field2: 30} + for _, item := range []TestItemAppendable{testItem21, testItem22, testItem23} { + err := DB.Upsert(nsj, item) + require.NoError(t, err) + } + + testItem11.TestItemJoined = []*TestItemAppendable{&testItem21} + testItem12.TestItemJoined = []*TestItemAppendable{&testItem22} + testItem13.TestItemJoined = []*TestItemAppendable{&testItem23} + + expectedItems1 := []TestJoinItemAppendable{testItem11, testItem12, testItem13} + + t.Run("test inner join with index multiple json paths", func(t *testing.T) { + qjoin := DBD.Query(nsj) + items, err := DBD.Query(ns).InnerJoin(qjoin, "test_joined"). + On("idx", reindexer.EQ, "idx").MustExec().FetchAll() + require.NoError(t, err) + + for i, v := range items { + require.EqualValues(t, &expectedItems1[i], v.(*TestJoinItemAppendable)) + } + }) + + t.Run("test left join with index multiple json paths", func(t *testing.T) { + qjoin := DBD.Query(nsj) + items, err := DBD.Query(ns).LeftJoin(qjoin, "test_joined"). + On("idx", reindexer.EQ, "idx").MustExec().FetchAll() + require.NoError(t, err) + + for i, v := range items { + require.EqualValues(t, &expectedItems1[i], v.(*TestJoinItemAppendable)) + } + }) + + const ns2 = TestJoinArrWithMultipleJsonPathsNs + const ns2j = TestJoinedArrWithMultipleJsonPathsNs + + testArrItem11 := TestJoinArrItemAppendable{ID: 1, ArrField1: []int{10, 11}} + testArrItem12 := TestJoinArrItemAppendable{ID: 2, ArrField2: []int{20, 21, 22}} + testArrItem13 := TestJoinArrItemAppendable{ID: 3, ArrField1: []int{30, 31, 32}, ArrField2: []int{32, 33}} + for _, item := range []TestJoinArrItemAppendable{testArrItem11, testArrItem12, testArrItem13} { + err := DB.Upsert(ns2, item) + require.NoError(t, err) + } + + testArrItem21 := TestArrItemAppendable{ID: 1, ArrField1: []int{10, 11, 12}} + testArrItem22 := TestArrItemAppendable{ID: 2, ArrField2: []int{20, 21}} + testArrItem23 := TestArrItemAppendable{ID: 3, ArrField2: []int{30, 31}, ArrField1: []int{32, 33, 34}} + for _, item := range []TestArrItemAppendable{testArrItem21, testArrItem22, testArrItem23} { + err := DB.Upsert(ns2j, item) + require.NoError(t, err) + } + + testArrItem11.TestArrItemJoined = []*TestArrItemAppendable{&testArrItem21} + testArrItem12.TestArrItemJoined = []*TestArrItemAppendable{&testArrItem22} + testArrItem13.TestArrItemJoined = []*TestArrItemAppendable{&testArrItem23} + + expectedItems2 := []TestJoinArrItemAppendable{testArrItem11, testArrItem12, testArrItem13} + + t.Run("test inner join with array index multiple json paths", func(t *testing.T) { + qjoin := DBD.Query(ns2j) + items, err := DBD.Query(ns2).InnerJoin(qjoin, "test_arr_joined"). + On("arridx", reindexer.EQ, "arridx").MustExec().FetchAll() + require.NoError(t, err) + + for i, v := range items { + require.EqualValues(t, &expectedItems2[i], v.(*TestJoinArrItemAppendable)) + } + }) + + t.Run("test left join with array index multiple json paths", func(t *testing.T) { + qjoin := DBD.Query(ns2j) + items, err := DBD.Query(ns2).LeftJoin(qjoin, "test_arr_joined"). + On("arridx", reindexer.EQ, "arridx").MustExec().FetchAll() + require.NoError(t, err) + + for i, v := range items { + require.EqualValues(t, &expectedItems2[i], v.(*TestJoinArrItemAppendable)) + } + }) +} + +type aggValuesStruct struct { + Min float64 `json:"min"` + Max float64 `json:"max"` + Sum float64 `json:"sum"` + Avg float64 `json:"avg"` +} + +func addValuesFromArrToMap(m map[string]int, arrField []int) { + for _, arr := range arrField { + m[strconv.Itoa(arr)] += 1 + } +} + +func TestAggregationsWithMultipleJsonPaths(t *testing.T) { + t.Parallel() + + const ns = TestAggregWithMultipleJsonPathsNs + const ns2 = TestAggregArrWithMultipleJsonPathsNs + + start, end := 0, 10 + minValue, maxRandValue := 1, 20 + + aggValues := aggValuesStruct{ + Min: float64(math.MaxFloat64), Max: float64(0), Sum: 0, Avg: 0, + } + + fieldsSet := make(map[string]int) + for i := start; i < end; i++ { + item1 := TestItemAppendable{ID: i, Field1: minValue + rand.Intn(maxRandValue)} + err := DB.Upsert(ns, item1) + require.NoError(t, err) + value1 := strconv.Itoa(item1.Field1) + fieldsSet[value1] += 1 + item2 := TestItemAppendable{ID: i + 10, Field2: minValue + rand.Intn(maxRandValue)} + err = DB.Upsert(ns, item2) + require.NoError(t, err) + value2 := strconv.Itoa(item2.Field2) + fieldsSet[value2] += 1 + + val1, val2 := float64(item1.Field1), float64(item2.Field2) + if aggValues.Min > val1 || aggValues.Min > val2 { + aggValues.Min = float64(min(item1.Field1, item2.Field2)) + } + if aggValues.Max < val1 || aggValues.Max < val2 { + aggValues.Max = float64(max(item1.Field1, item2.Field2)) + } + aggValues.Sum += float64(item1.Field1 + item2.Field2) + } + aggValues.Avg = aggValues.Sum / float64(end*2) + + fieldValues := []string{} + for k := range fieldsSet { + fieldValues = append(fieldValues, k) + } + sort.Strings(fieldValues) + + t.Run("test aggreations with index multiple json paths", func(t *testing.T) { + q := DBD.Query(ns) + q.AggregateMin("idx") + q.AggregateMax("idx") + q.AggregateSum("idx") + q.AggregateAvg("idx") + + it := q.MustExec() + require.NoError(t, it.Error()) + defer it.Close() + + aggResults := it.AggResults() + require.Equal(t, 4, len(aggResults)) + require.Equal(t, aggValues.Min, *aggResults[0].Value) + require.Equal(t, aggValues.Max, *aggResults[1].Value) + require.Equal(t, aggValues.Sum, *aggResults[2].Value) + require.Equal(t, aggValues.Avg, *aggResults[3].Value) + }) + + t.Run("test distinct with index multiple json paths", func(t *testing.T) { + it := DBD.Query(ns).Distinct("idx").MustExec() + require.NoError(t, it.Error()) + defer it.Close() + require.Equal(t, len(fieldValues), it.Count()) + + aggResults := it.AggResults() + require.Equal(t, 1, len(aggResults)) + sort.Strings(aggResults[0].Distincts) + require.Equal(t, fieldValues, aggResults[0].Distincts) + }) + + t.Run("test facet with index multiple json paths", func(t *testing.T) { + q := DBD.Query(ns) + q.AggregateFacet("idx") + it := q.MustExec() + require.NoError(t, it.Error()) + defer it.Close() + require.Equal(t, 0, it.Count()) + + aggResults := it.AggResults() + require.Equal(t, 1, len(aggResults)) + for _, facet := range aggResults[0].Facets { + require.Equal(t, fieldsSet[facet.Values[0]], facet.Count) + } + }) + + aggArrValues := aggValuesStruct{ + Min: float64(math.MaxFloat64), Max: float64(0), Sum: 0, Avg: 0, + } + + intValues := []int{} + fieldsArrSet := make(map[string]int) + for i := start; i < end; i++ { + item1 := TestArrItemAppendable{ID: i, ArrField1: randIntArr(2, minValue, maxRandValue)} + err := DB.Upsert(ns2, item1) + require.NoError(t, err) + addValuesFromArrToMap(fieldsArrSet, item1.ArrField1) + item2 := TestArrItemAppendable{ID: i + 10, ArrField2: randIntArr(2, minValue, maxRandValue)} + err = DB.Upsert(ns2, item2) + require.NoError(t, err) + addValuesFromArrToMap(fieldsArrSet, item2.ArrField2) + item3 := TestArrItemAppendable{ID: i + 20, ArrField1: randIntArr(2, minValue, maxRandValue), + ArrField2: randIntArr(2, 0, minValue)} + err = DB.Upsert(ns2, item3) + require.NoError(t, err) + addValuesFromArrToMap(fieldsArrSet, item3.ArrField1) + addValuesFromArrToMap(fieldsArrSet, item3.ArrField2) + + arrFields := [][]int{item1.ArrField1, item2.ArrField2, item3.ArrField1, item3.ArrField2} + for _, intArrValue := range arrFields { + for _, intValue := range intArrValue { + intValues = append(intValues, intValue) + aggArrValues.Sum += float64(intValue) + } + } + } + sort.Ints(intValues) + + aggArrValues.Min = float64(intValues[0]) + aggArrValues.Max = float64(intValues[len(intValues)-1]) + aggArrValues.Avg = aggArrValues.Sum / float64(len(intValues)) + + fieldValues2 := []string{} + for k := range fieldsArrSet { + fieldValues2 = append(fieldValues2, k) + } + sort.Strings(fieldValues2) + + t.Run("test aggreations with array index multiple json paths", func(t *testing.T) { + q := DBD.Query(ns2) + q.AggregateMin("arridx") + q.AggregateMax("arridx") + q.AggregateSum("arridx") + q.AggregateAvg("arridx") + + it := q.MustExec() + require.NoError(t, it.Error()) + defer it.Close() + aggResults := it.AggResults() + require.Equal(t, 4, len(aggResults)) + require.Equal(t, aggArrValues.Min, *aggResults[0].Value) + require.Equal(t, aggArrValues.Max, *aggResults[1].Value) + require.Equal(t, aggArrValues.Sum, *aggResults[2].Value) + require.Equal(t, aggArrValues.Avg, *aggResults[3].Value) + }) + + t.Run("test distinct with array index multiple json paths", func(t *testing.T) { + it := DBD.Query(ns2).Distinct("arridx").MustExec() + require.NoError(t, it.Error()) + defer it.Close() + // require.Equal(t, end*3, it.Count()) TODO: 1526 + + aggResults := it.AggResults() + require.Equal(t, 1, len(aggResults)) + sort.Strings(aggResults[0].Distincts) + require.Equal(t, fieldValues2, aggResults[0].Distincts) + }) + + t.Run("test facet with array index multiple json paths", func(t *testing.T) { + q := DBD.Query(ns2) + q.AggregateFacet("arridx") + it := q.MustExec() + require.NoError(t, it.Error()) + defer it.Close() + require.Equal(t, 0, it.Count()) + + aggResults := it.AggResults() + require.Equal(t, 1, len(aggResults)) + for _, facet := range aggResults[0].Facets { + require.Equal(t, fieldsArrSet[facet.Values[0]], facet.Count) + } + }) +} + +func TestNestedWithMultipleJsonPaths(t *testing.T) { + t.Parallel() + + const ns = TestNestedWithMultipleJsonPathsNs + const ns2 = TestNestedArrWithMultipleJsonPathsNs + + testItem1 := TestItemNestedAppendable{ID: 1, NField: 10, + TestNested1: &TestItemNestedAppendableN1{ID: 1, NField: 30}, + TestNested2: &TestItemNestedAppendableN2{ID: 1, NField: 10}} + testItem2 := TestItemNestedAppendable{ID: 2, NField: 20, + TestNested1: &TestItemNestedAppendableN1{ID: 2, NField: 21}, + TestNested2: &TestItemNestedAppendableN2{ID: 2, NField: 30}} + testItem3 := TestItemNestedAppendable{ID: 3, NField: 30, + TestNested1: &TestItemNestedAppendableN1{ID: 3, NField: 31}, + TestNested2: &TestItemNestedAppendableN2{ID: 3, NField: 30}} + + for _, item := range []TestItemNestedAppendable{testItem1, testItem2, testItem3} { + err := DB.Upsert(ns, item) + require.NoError(t, err) + } + + t.Run("test select with nested index multiple json paths", func(t *testing.T) { + it1 := DBD.Query(ns).Where("idx", reindexer.EQ, testItem1.NField).MustExec() + checkResultItem(t, it1, &testItem1) + + it2 := DBD.Query(ns).Where("idx", reindexer.EQ, testItem2.TestNested1.NField).MustExec() + checkResultItem(t, it2, &testItem2) + + items, err := DBD.Query(ns). + Where("idx", reindexer.EQ, testItem3.NField).MustExec().FetchAll() + require.NoError(t, err) + expectedItems := []TestItemNestedAppendable{testItem1, testItem2, testItem3} + for i, v := range items { + require.EqualValues(t, &expectedItems[i], v.(*TestItemNestedAppendable)) + } + }) + + testArrItem1 := TestItemNestedArrAppendable{ID: 1, NFieldArr: []int{10, 11}, + TestNested1: &TestItemNestedArrAppendableN1{ID: 1, NFieldArr: []int{30, 11}}, + TestNested2: &TestItemNestedArrAppendableN2{ID: 1, NFieldArr: []int{10, 11}}} + testArrItem2 := TestItemNestedArrAppendable{ID: 2, NFieldArr: []int{20, 11}, + TestNested1: &TestItemNestedArrAppendableN1{ID: 1, NFieldArr: []int{21, 11}}, + TestNested2: &TestItemNestedArrAppendableN2{ID: 1, NFieldArr: []int{31, 30}}} + testArrItem3 := TestItemNestedArrAppendable{ID: 3, NFieldArr: []int{30, 11}, + TestNested1: &TestItemNestedArrAppendableN1{ID: 1, NFieldArr: []int{31, 11}}, + TestNested2: &TestItemNestedArrAppendableN2{ID: 1, NFieldArr: []int{30, 11}}} + + for _, item := range []TestItemNestedArrAppendable{testArrItem1, testArrItem2, testArrItem3} { + err := DB.Upsert(ns2, item) + require.NoError(t, err) + } + + t.Run("test select with nested array index multiple json paths", func(t *testing.T) { + it1 := DBD.Query(ns2).Where("arridx", reindexer.EQ, testArrItem1.NFieldArr[0]).MustExec() + checkResultItem(t, it1, &testArrItem1) + + it2 := DBD.Query(ns2).Where("arridx", reindexer.EQ, testArrItem2.TestNested1.NFieldArr[0]).MustExec() + checkResultItem(t, it2, &testArrItem2) + + items, err := DBD.Query(ns2). + Where("arridx", reindexer.EQ, testArrItem3.NFieldArr[0]).MustExec().FetchAll() + require.NoError(t, err) + expectedItems := []TestItemNestedArrAppendable{testArrItem1, testArrItem2, testArrItem3} + for i, v := range items { + require.EqualValues(t, &expectedItems[i], v.(*TestItemNestedArrAppendable)) + } + }) + +} diff --git a/test/queries_test.go b/test/queries_test.go index c89451d7f..440217696 100644 --- a/test/queries_test.go +++ b/test/queries_test.go @@ -113,23 +113,23 @@ type TestItemWithSparse struct { Prices []*TestJoinItem `reindex:"prices,,joined"` Pricesx []*TestJoinItem `reindex:"pricesx,,joined"` ID int `reindex:"id,-"` - Genre int64 `reindex:"genre,tree,sparse"` + Genre int64 `reindex:"genre,tree"` Year int `reindex:"year,tree,sparse"` Packages []int `reindex:"packages,hash,sparse"` Name string `reindex:"name,tree,sparse"` Countries []string `reindex:"countries,tree,sparse"` - Age int `reindex:"age,hash,sparse"` + Age int `reindex:"age,hash"` AgeLimit int64 `json:"age_limit" reindex:"age_limit,hash,sparse"` CompanyName string `json:"company_name" reindex:"company_name,hash,sparse"` Address string `json:"address"` PostalCode int `json:"postal_code"` Description string `reindex:"description,fuzzytext"` - Rate float64 `reindex:"rate,tree,sparse"` + Rate float64 `reindex:"rate,tree"` ExchangeRate float64 `json:"exchange_rate"` PollutionRate float32 `json:"pollution_rate"` IsDeleted bool `reindex:"isdeleted,-"` Actor Actor `reindex:"actor"` - PricesIDs []int `reindex:"price_id"` + PricesIDs []int `reindex:"price_id,,sparse"` LocationID string `reindex:"location"` EndTime int `reindex:"end_time,-"` StartTime int `reindex:"start_time,tree"` @@ -636,7 +636,7 @@ func TestWALQueries(t *testing.T) { }) t.Run("JSON WAL query with ANY", func(t *testing.T) { - jsonIt := DBD.Query(ns).Where("#lsn", reindexer.ANY, 0).ExecToJson() + jsonIt := DBD.Query(ns).Where("#lsn", reindexer.ANY, nil).ExecToJson() validateJson(t, jsonIt) }) @@ -646,7 +646,7 @@ func TestWALQueries(t *testing.T) { }) t.Run("CJSON WAL query with ANY (expecting error)", func(t *testing.T) { - it := DBD.Query(ns).Where("#lsn", reindexer.ANY, 0).Exec() + it := DBD.Query(ns).Where("#lsn", reindexer.ANY, nil).Exec() assert.Error(t, it.Error()) }) } @@ -873,8 +873,8 @@ func callQueriesSequence(t *testing.T, namespace string, distinct []string, sort newTestQuery(DB, namespace).Where("name", reindexer.LIKE, makeLikePattern(randString())).ExecAndVerify(t) newTestQuery(DB, namespace).Where("packages", reindexer.SET, randIntArr(10, 10000, 50)).Distinct(distinct).Sort(sort, desc).ExecAndVerify(t) - newTestQuery(DB, namespace).Where("packages", reindexer.EMPTY, 0).Distinct(distinct).Sort(sort, desc).ExecAndVerify(t) - newTestQuery(DB, namespace).Where("packages", reindexer.ANY, 0).Distinct(distinct).Sort(sort, desc).ExecAndVerify(t) + newTestQuery(DB, namespace).Where("packages", reindexer.EMPTY, nil).Distinct(distinct).Sort(sort, desc).ExecAndVerify(t) + newTestQuery(DB, namespace).Where("packages", reindexer.ANY, nil).Distinct(distinct).Sort(sort, desc).ExecAndVerify(t) newTestQuery(DB, namespace).Where("isdeleted", reindexer.EQ, true).Distinct(distinct).Sort(sort, desc).ExecAndVerify(t) @@ -929,12 +929,12 @@ func callQueriesSequence(t *testing.T, namespace string, distinct []string, sort newTestQuery(DB, namespace).Distinct(distinct).Sort(sort, desc).ReqTotal(). Where("genre", reindexer.SET, []int{5, 1, 7}). Where("year", reindexer.LT, 2010).Or().Where("genre", reindexer.EQ, 3). - Where("packages", reindexer.SET, randIntArr(5, 10000, 50)).Or().Where("packages", reindexer.EMPTY, 0).Debug(reindexer.TRACE). + Where("packages", reindexer.SET, randIntArr(5, 10000, 50)).Or().Where("packages", reindexer.EMPTY, nil).Debug(reindexer.TRACE). ExecAndVerify(t) newTestQuery(DB, namespace).Distinct(distinct).Sort(sort, desc).ReqTotal(). Where("genre", reindexer.SET, []int{5, 1, 7}). - Where("year", reindexer.LT, 2010).Or().Where("packages", reindexer.ANY, 0). + Where("year", reindexer.LT, 2010).Or().Where("packages", reindexer.ANY, nil). Where("packages", reindexer.SET, randIntArr(5, 10000, 50)).Debug(reindexer.TRACE). ExecAndVerify(t) @@ -1349,12 +1349,12 @@ func CheckTestItemsDSLQueries(t *testing.T) { { Field: "PACKAGES", Cond: "ANY", - Value: 0, + Value: nil, }, { Field: "countries", Cond: "EMPTY", - Value: 0, + Value: nil, }, { Field: "isdeleted", @@ -1381,8 +1381,8 @@ func CheckTestItemsDSLQueries(t *testing.T) { newTestQuery(DB, "test_items"). Where("year", reindexer.GT, 2016). Where("genre", reindexer.SET, []int{1, 2, 3}). - Where("packages", reindexer.ANY, 0). - Where("countries", reindexer.EMPTY, 0). + Where("packages", reindexer.ANY, nil). + Where("countries", reindexer.EMPTY, nil). Where("isdeleted", reindexer.EQ, true). Where("company_name", reindexer.LIKE, likePattern). Sort("year", true). @@ -1700,13 +1700,13 @@ func TestStrictMode(t *testing.T) { t.Run("Strict filtering/sort by folded fields (empty namespace)", func(t *testing.T) { { - itNames := DBD.Query(namespace).Strict(reindexer.QueryStrictModeNames).Where("nested.Name", reindexer.ANY, 0).Sort("nested.Name", false).MustExec() + itNames := DBD.Query(namespace).Strict(reindexer.QueryStrictModeNames).Where("nested.Name", reindexer.ANY, nil).Sort("nested.Name", false).MustExec() assert.Equal(t, itNames.Count(), 0) itNames.Close() - itNone := DBD.Query(namespace).Strict(reindexer.QueryStrictModeNone).Where("nested.Name", reindexer.ANY, 0).Sort("nested.Name", false).MustExec() + itNone := DBD.Query(namespace).Strict(reindexer.QueryStrictModeNone).Where("nested.Name", reindexer.ANY, nil).Sort("nested.Name", false).MustExec() assert.Equal(t, itNone.Count(), 0) itNone.Close() - itIndexes := DBD.Query(namespace).Strict(reindexer.QueryStrictModeIndexes).Where("nested.Name", reindexer.ANY, 0).Sort("nested.Name", false).Exec() + itIndexes := DBD.Query(namespace).Strict(reindexer.QueryStrictModeIndexes).Where("nested.Name", reindexer.ANY, nil).Sort("nested.Name", false).Exec() assert.Error(t, itIndexes.Error()) itIndexes.Close() } @@ -1833,25 +1833,25 @@ func TestStrictMode(t *testing.T) { assert.Error(t, itIndexes1.Error()) itIndexes1.Close() - itNone3 := DBD.Query(namespace).Where("unknown_field", reindexer.EMPTY, 0).Sort("year", true). + itNone3 := DBD.Query(namespace).Where("unknown_field", reindexer.EMPTY, nil).Sort("year", true). Sort("name", false).Strict(reindexer.QueryStrictModeNone).MustExec() assert.Equal(t, itNone3.Count(), itemsCount) itNone3.Close() } { - itNames := DBD.Query(namespace).Where("unknown_field", reindexer.EMPTY, 0).Sort("year", true). + itNames := DBD.Query(namespace).Where("unknown_field", reindexer.EMPTY, nil).Sort("year", true). Sort("name", false).Strict(reindexer.QueryStrictModeNames).Exec() assert.Error(t, itNames.Error()) itNames.Close() - itNone := DBD.Query(namespace).Where("unknown_field", reindexer.EMPTY, 0).Sort("year", true). + itNone := DBD.Query(namespace).Where("unknown_field", reindexer.EMPTY, nil).Sort("year", true). Sort("name", false).Strict(reindexer.QueryStrictModeNone).MustExec() itNone.Close() itAll := DBD.Query(namespace).Sort("year", true). Sort("name", false).Strict(reindexer.QueryStrictModeNone).MustExec() assert.Equal(t, itNone.Count(), itAll.Count()) itAll.Close() - itIndexes := DBD.Query(namespace).Where("unknown_field", reindexer.EMPTY, 0).Sort("year", true). + itIndexes := DBD.Query(namespace).Where("unknown_field", reindexer.EMPTY, nil).Sort("year", true). Sort("name", false).Strict(reindexer.QueryStrictModeIndexes).Exec() assert.Error(t, itIndexes.Error()) itIndexes.Close() diff --git a/test/query_test.go b/test/query_test.go index 9b7daea79..924523459 100644 --- a/test/query_test.go +++ b/test/query_test.go @@ -395,12 +395,14 @@ func (qt *queryTest) DeepReplEqual() *queryTest { func (qt *queryTest) where(index string, condition int, keys interface{}) *queryTest { qte := queryTestEntry{index: index, condition: condition, ikeys: keys} - if reflect.TypeOf(keys).Kind() == reflect.Slice || reflect.TypeOf(keys).Kind() == reflect.Array { - for i := 0; i < reflect.ValueOf(keys).Len(); i++ { - qte.keys = append(qte.keys, reflect.ValueOf(keys).Index(i)) + if keys != nil { + if reflect.TypeOf(keys).Kind() == reflect.Slice || reflect.TypeOf(keys).Kind() == reflect.Array { + for i := 0; i < reflect.ValueOf(keys).Len(); i++ { + qte.keys = append(qte.keys, reflect.ValueOf(keys).Index(i)) + } + } else { + qte.keys = append(qte.keys, reflect.ValueOf(keys)) } - } else { - qte.keys = append(qte.keys, reflect.ValueOf(keys)) } qte.fieldIdx, _ = qt.ns.getField(index) qt.entries.addEntry(qte, qt.nextOp) @@ -1343,6 +1345,13 @@ func min(a, b int) int { return b } +func max(a, b int) int { + if a > b { + return a + } + return b +} + func checkResult(cmpRes int, cond int) bool { result := false switch cond { diff --git a/test/reindexer_test.go b/test/reindexer_test.go index 7f6c3f11f..6a1f03cd8 100644 --- a/test/reindexer_test.go +++ b/test/reindexer_test.go @@ -44,6 +44,8 @@ func TestMain(m *testing.M) { os.RemoveAll("/tmp/reindex_test/") } else if udsn.Scheme == "cproto" { opts = []interface{}{reindexer.WithCreateDBIfMissing(), reindexer.WithNetCompression(), reindexer.WithAppName("RxTestInstance")} + } else if udsn.Scheme == "ucproto" { + opts = []interface{}{reindexer.WithCreateDBIfMissing(), reindexer.WithAppName("RxTestInstance")} } DB = NewReindexWrapper(*dsn, opts...)