Skip to content

Commit

Permalink
add strict mode to conversions + coverage
Browse files Browse the repository at this point in the history
  • Loading branch information
generall committed Jan 3, 2025
1 parent 362eee7 commit e3ce46e
Show file tree
Hide file tree
Showing 2 changed files with 71 additions and 0 deletions.
54 changes: 54 additions & 0 deletions qdrant_client/conversions/conversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -2019,6 +2019,42 @@ def convert_search_matrix_offsets(
ids=[cls.convert_point_id(p_id) for p_id in model.ids],
)

@classmethod
def convert_strict_mode_config(cls, model: grpc.StrictModeConfig) -> rest.StrictModeConfig:
return rest.StrictModeConfig(
enabled=model.enabled if model.HasField("enabled") else None,
max_query_limit=model.max_query_limit if model.HasField("max_query_limit") else None,
max_timeout=model.max_timeout if model.HasField("max_timeout") else None,
unindexed_filtering_retrieve=model.unindexed_filtering_retrieve
if model.HasField("unindexed_filtering_retrieve")
else None,
unindexed_filtering_update=model.unindexed_filtering_update
if model.HasField("unindexed_filtering_update")
else None,
search_max_hnsw_ef=model.search_max_hnsw_ef
if model.HasField("search_max_hnsw_ef")
else None,
search_allow_exact=model.search_allow_exact
if model.HasField("search_allow_exact")
else None,
search_max_oversampling=model.search_max_oversampling
if model.HasField("search_max_oversampling")
else None,
upsert_max_batchsize=model.upsert_max_batchsize
if model.HasField("upsert_max_batchsize")
else None,
max_collection_vector_size_bytes=model.max_collection_vector_size_bytes
if model.HasField("max_collection_vector_size_bytes")
else None,
read_rate_limit=model.read_rate_limit if model.HasField("read_rate_limit") else None,
write_rate_limit=model.write_rate_limit
if model.HasField("write_rate_limit")
else None,
max_collection_payload_size_bytes=model.max_collection_payload_size_bytes
if model.HasField("max_collection_payload_size_bytes")
else None,
)


# ----------------------------------------
#
Expand Down Expand Up @@ -3932,3 +3968,21 @@ def convert_search_matrix_offsets(
scores=list(model.scores),
ids=[cls.convert_extended_point_id(p_id) for p_id in model.ids],
)

@classmethod
def convert_strict_mode_config(cls, model: rest.StrictModeConfig) -> grpc.StrictModeConfig:
return grpc.StrictModeConfig(
enabled=model.enabled,
max_query_limit=model.max_query_limit,
max_timeout=model.max_timeout,
unindexed_filtering_retrieve=model.unindexed_filtering_retrieve,
unindexed_filtering_update=model.unindexed_filtering_update,
search_max_hnsw_ef=model.search_max_hnsw_ef,
search_allow_exact=model.search_allow_exact,
search_max_oversampling=model.search_max_oversampling,
upsert_max_batchsize=model.upsert_max_batchsize,
max_collection_vector_size_bytes=model.max_collection_vector_size_bytes,
read_rate_limit=model.read_rate_limit,
write_rate_limit=model.write_rate_limit,
max_collection_payload_size_bytes=model.max_collection_payload_size_bytes,
)
17 changes: 17 additions & 0 deletions tests/conversions/fixtures.py
Original file line number Diff line number Diff line change
Expand Up @@ -1362,6 +1362,22 @@
ids=[point_id_1, point_id_2],
)

strict_mode_config = grpc.StrictModeConfig(
enabled=True,
max_query_limit=100,
max_timeout=10,
unindexed_filtering_retrieve=False,
unindexed_filtering_update=False,
search_max_hnsw_ef=256,
search_allow_exact=False,
search_max_oversampling=10,
upsert_max_batchsize=64,
max_collection_vector_size_bytes=1024 * 1024 * 1024,
# read_rate_limit=model.read_rate_limit, test empty field
write_rate_limit=2000,
max_collection_payload_size_bytes=10 * 1024 * 1024 * 1024,
)


fixtures = {
"CollectionParams": [collection_params, collection_params_2],
Expand Down Expand Up @@ -1523,6 +1539,7 @@
"HealthCheckReply": [health_check_reply],
"SearchMatrixPairs": [search_matrix_pairs],
"SearchMatrixOffsets": [search_matrix_offsets],
"StrictModeConfig": [strict_mode_config],
}


Expand Down

0 comments on commit e3ce46e

Please sign in to comment.