Skip to content

Commit

Permalink
chore: update jsoncons version to 0.178
Browse files Browse the repository at this point in the history
Signed-off-by: Roman Gershman <[email protected]>
  • Loading branch information
romange committed Jan 8, 2025
1 parent 5adb976 commit 2724eb2
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 44 deletions.
4 changes: 1 addition & 3 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,8 @@ set(REFLEX "${THIRD_PARTY_LIB_DIR}/reflex/bin/reflex")
add_third_party(
jsoncons
GIT_REPOSITORY https://github.com/dragonflydb/jsoncons
# URL https://github.com/danielaparker/jsoncons/archive/refs/tags/v0.171.1.tar.gz
GIT_TAG Dragonfly
GIT_TAG Dragonfly.178
GIT_SHALLOW 1
# PATCH_COMMAND patch -p1 -i "${CMAKE_SOURCE_DIR}/patches/jsoncons-v0.171.0.patch"
CMAKE_PASS_FLAGS "-DJSONCONS_BUILD_TESTS=OFF -DJSONCONS_HAS_POLYMORPHIC_ALLOCATOR=ON"
LIB "none"
)
Expand Down
7 changes: 7 additions & 0 deletions src/core/compact_object.cc
Original file line number Diff line number Diff line change
Expand Up @@ -878,6 +878,9 @@ void CompactObj::SetJson(JsonType&& j) {
if (taglen_ == JSON_TAG && JsonEnconding() == kEncodingJsonCons) {
DCHECK(u_.json_obj.cons.json_ptr != nullptr); // must be allocated
u_.json_obj.cons.json_ptr->swap(j);
DCHECK(jsoncons::is_trivial_storage(u_.json_obj.cons.json_ptr->storage_kind()) ||
u_.json_obj.cons.json_ptr->get_allocator().resource() == tl.local_mr);

// We do not set bytes_used as this is needed. Consider the two following cases:
// 1. old json contains 50 bytes. The delta for new one is 50, so the total bytes
// the new json occupies is 100.
Expand All @@ -889,6 +892,10 @@ void CompactObj::SetJson(JsonType&& j) {

SetMeta(JSON_TAG);
u_.json_obj.cons.json_ptr = AllocateMR<JsonType>(std::move(j));

// With trivial storage json_ptr->get_allocator() throws an exception.
DCHECK(jsoncons::is_trivial_storage(u_.json_obj.cons.json_ptr->storage_kind()) ||
u_.json_obj.cons.json_ptr->get_allocator().resource() == tl.local_mr);
u_.json_obj.cons.bytes_used = 0;
}

Expand Down
68 changes: 28 additions & 40 deletions src/server/json_family.cc
Original file line number Diff line number Diff line change
Expand Up @@ -319,9 +319,18 @@ bool JsonAreEquals(const JsonType& lhs, const JsonType& rhs) {
}
}

template <typename Callback>
// Use this method on the coordinator thread
std::optional<JsonType> JsonFromString(std::string_view input) {
return dfly::JsonFromString(input, PMR_NS::get_default_resource());
}

// Use this method on the shard thread
std::optional<JsonType> ShardJsonFromString(std::string_view input) {
return dfly::JsonFromString(input, CompactObj::memory_resource());
}

OpResult<DbSlice::AddOrFindResult> SetJson(const OpArgs& op_args, string_view key,
Callback parse_json_value) {
string_view json_str) {
auto& db_slice = op_args.GetDbSlice();

auto op_res = db_slice.AddOrFind(op_args.db_cntx, key);
Expand All @@ -331,17 +340,20 @@ OpResult<DbSlice::AddOrFindResult> SetJson(const OpArgs& op_args, string_view ke

op_args.shard->search_indices()->RemoveDoc(key, op_args.db_cntx, res.it->second);

OpResult<JsonType> json_value = parse_json_value();
RETURN_ON_BAD_STATUS(json_value);
std::optional<JsonType> parsed_json = ShardJsonFromString(json_str);
if (!parsed_json) {
VLOG(1) << "got invalid JSON string '" << json_str << "' cannot be saved";
return OpStatus::INVALID_VALUE;
}

if (JsonEnconding() == kEncodingJsonFlat) {
flexbuffers::Builder fbb;
json::FromJsonType(json_value.value(), &fbb);
json::FromJsonType(*parsed_json, &fbb);
fbb.Finish();
const auto& buf = fbb.GetBuffer();
res.it->second.SetJson(buf.data(), buf.size());
} else {
res.it->second.SetJson(*std::move(json_value));
res.it->second.SetJson(std::move(*parsed_json));
}
op_args.shard->search_indices()->AddDoc(key, op_args.db_cntx, res.it->second);
return std::move(res);
Expand Down Expand Up @@ -388,16 +400,6 @@ string JsonTypeToName(const JsonType& val) {
return std::string{};
}

// Use this method on the coordinator thread
std::optional<JsonType> JsonFromString(std::string_view input) {
return dfly::JsonFromString(input, PMR_NS::get_default_resource());
}

// Use this method on the shard thread
std::optional<JsonType> ShardJsonFromString(std::string_view input) {
return dfly::JsonFromString(input, CompactObj::memory_resource());
}

OpResult<JsonType*> GetJson(const OpArgs& op_args, string_view key) {
auto it_res = op_args.GetDbSlice().FindReadOnly(op_args.db_cntx, key, OBJ_JSON);
if (!it_res.ok())
Expand Down Expand Up @@ -953,8 +955,8 @@ OpResult<long> OpDel(const OpArgs& op_args, string_view key, string_view path,

if (json_path.HoldsJsonPath()) {
const json::Path& path = json_path.AsJsonPath();
long deletions = json::MutatePath(
path, [](optional<string_view>, JsonType* val) { return true; }, json_val);
long deletions =
json::MutatePath(path, [](optional<string_view>, JsonType* val) { return true; }, json_val);
return deletions;
}

Expand Down Expand Up @@ -1308,15 +1310,6 @@ auto OpResp(const OpArgs& op_args, string_view key, const WrappedJsonPath& json_
OpResult<bool> OpSet(const OpArgs& op_args, string_view key, string_view path,
const WrappedJsonPath& json_path, std::string_view json_str,
bool is_nx_condition, bool is_xx_condition) {
auto parse_json = [&json_str]() -> OpResult<JsonType> {
std::optional<JsonType> parsed_json = ShardJsonFromString(json_str);
if (!parsed_json) {
VLOG(1) << "got invalid JSON string '" << json_str << "' cannot be saved";
return OpStatus::SYNTAX_ERR;
}
return parsed_json.value();
};

// The whole key should be replaced.
// NOTE: unlike in Redis, we are overriding the value when the path is "$"
// this is regardless of the current key type. In redis if the key exists
Expand All @@ -1335,13 +1328,7 @@ OpResult<bool> OpSet(const OpArgs& op_args, string_view key, string_view path,
}

JsonMemTracker mem_tracker;
// We need to deep copy parsed_json.value() and not use move! The reason is that otherwise
// it's really difficult to properly track memory deltas because even if we move below,
// the deallocation of parsed_json won't happen in the scope of SetJson but in the scope
// of this function. Because of this, the memory tracking will be off. Another solution here,
// is to use absl::Cleanup and dispatch another Find() but that's too complicated because then
// you need to take into account the order of destructors.
OpResult<DbSlice::AddOrFindResult> st = SetJson(op_args, key, std::move(parse_json));
OpResult<DbSlice::AddOrFindResult> st = SetJson(op_args, key, json_str);
RETURN_ON_BAD_STATUS(st);
mem_tracker.SetJsonSize(st->it->second, st->is_new);
return true;
Expand All @@ -1355,18 +1342,19 @@ OpResult<bool> OpSet(const OpArgs& op_args, string_view key, string_view path,
bool path_exists = false;
bool operation_result = false;

OpResult<JsonType> parsed_json = parse_json();
RETURN_ON_BAD_STATUS(parsed_json);
optional<JsonType> parsed_json = ShardJsonFromString(json_str);
if (!parsed_json) {
VLOG(1) << "got invalid JSON string '" << json_str << "' cannot be saved";
return OpStatus::INVALID_VALUE;
}
const JsonType& new_json = parsed_json.value();

auto cb = [&](std::optional<std::string_view>, JsonType* val) -> MutateCallbackResult<> {
path_exists = true;
if (!is_nx_condition) {
operation_result = true;
static_assert(
std::is_same_v<std::allocator_traits<JsonType>::propagate_on_container_copy_assignment,
std::false_type>);
*val = new_json;
*val =
JsonType(new_json, std::pmr::polymorphic_allocator<char>{CompactObj::memory_resource()});
}
return {};
};
Expand Down
2 changes: 1 addition & 1 deletion tests/dragonfly/memory_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ async def test_rss_used_mem_gap(df_factory, type, keys, val_size, elements):
await asyncio.sleep(1) # Wait for another RSS heartbeat update in Dragonfly

cmd = f"DEBUG POPULATE {keys} k {val_size} RAND TYPE {type} ELEMENTS {elements}"
print(f"Running {cmd}")
logging.info(f"Running {cmd}")
await client.execute_command(cmd)

await asyncio.sleep(2) # Wait for another RSS heartbeat update in Dragonfly
Expand Down

0 comments on commit 2724eb2

Please sign in to comment.