From ace169ac46623678c97f6cce3b131894365db344 Mon Sep 17 00:00:00 2001 From: Fangyin Cheng Date: Tue, 18 Jun 2024 11:11:43 +0800 Subject: [PATCH] fix(core): Fix AWEL branch bug (#1640) --- dbgpt/app/scene/operators/app_operator.py | 6 +- dbgpt/core/awel/__init__.py | 2 + dbgpt/core/awel/flow/base.py | 31 +- dbgpt/core/awel/flow/compat.py | 44 +- dbgpt/core/awel/flow/flow_factory.py | 8 - dbgpt/core/awel/operators/base.py | 6 + dbgpt/core/awel/operators/common_operator.py | 40 +- dbgpt/core/awel/runner/local_runner.py | 9 +- dbgpt/core/awel/tests/test_run_dag.py | 2 +- .../core/interface/operators/llm_operator.py | 142 ++++-- dbgpt/core/operators/__init__.py | 2 + dbgpt/rag/embedding/rerank.py | 2 +- dbgpt/rag/operators/embedding.py | 6 +- dbgpt/serve/rag/connector.py | 2 + .../basic_syntax/2.4_branch_operator.md | 4 +- examples/awel/data_analyst_assistant.py | 15 +- examples/awel/simple_chat_history_example.py | 7 +- examples/awel/simple_llm_client_example.py | 7 +- i18n/locales/zh_CN/LC_MESSAGES/dbgpt_app.mo | Bin 324 -> 382 bytes i18n/locales/zh_CN/LC_MESSAGES/dbgpt_app.po | 7 +- i18n/locales/zh_CN/LC_MESSAGES/dbgpt_core.mo | Bin 13152 -> 15271 bytes i18n/locales/zh_CN/LC_MESSAGES/dbgpt_core.po | 439 +++++++++++------- i18n/locales/zh_CN/LC_MESSAGES/dbgpt_model.po | 16 +- i18n/locales/zh_CN/LC_MESSAGES/dbgpt_rag.mo | Bin 7481 -> 9137 bytes i18n/locales/zh_CN/LC_MESSAGES/dbgpt_rag.po | 214 +++++---- i18n/locales/zh_CN/LC_MESSAGES/dbgpt_serve.mo | Bin 711 -> 2607 bytes i18n/locales/zh_CN/LC_MESSAGES/dbgpt_serve.po | 70 ++- .../zh_CN/LC_MESSAGES/dbgpt_storage.mo | Bin 4423 -> 4731 bytes .../zh_CN/LC_MESSAGES/dbgpt_storage.po | 223 +++++---- i18n/locales/zh_CN/LC_MESSAGES/dbgpt_util.mo | Bin 324 -> 807 bytes i18n/locales/zh_CN/LC_MESSAGES/dbgpt_util.po | 35 +- setup.py | 4 + 32 files changed, 866 insertions(+), 477 deletions(-) diff --git a/dbgpt/app/scene/operators/app_operator.py b/dbgpt/app/scene/operators/app_operator.py index 17d21d370..c55e6b179 100644 --- a/dbgpt/app/scene/operators/app_operator.py +++ b/dbgpt/app/scene/operators/app_operator.py @@ -13,8 +13,8 @@ from dbgpt.core.awel import ( DAG, BaseOperator, + BranchJoinOperator, InputOperator, - JoinOperator, MapOperator, SimpleCallDataInputSource, ) @@ -195,9 +195,7 @@ def build_cached_chat_operator( cache_task_name=cache_task_name, ) # Create a join node to merge outputs from the model and cache nodes, just keep the first not empty output - join_task = JoinOperator( - combine_function=lambda model_out, cache_out: cache_out or model_out - ) + join_task = BranchJoinOperator() # Define the workflow structure using the >> operator input_task >> cache_check_branch_task diff --git a/dbgpt/core/awel/__init__.py b/dbgpt/core/awel/__init__.py index ea7dd2f14..04dd43855 100644 --- a/dbgpt/core/awel/__init__.py +++ b/dbgpt/core/awel/__init__.py @@ -17,6 +17,7 @@ from .operators.base import BaseOperator, WorkflowRunner from .operators.common_operator import ( BranchFunc, + BranchJoinOperator, BranchOperator, BranchTaskType, InputOperator, @@ -78,6 +79,7 @@ "ReduceStreamOperator", "TriggerOperator", "MapOperator", + "BranchJoinOperator", "BranchOperator", "InputOperator", "BranchFunc", diff --git a/dbgpt/core/awel/flow/base.py b/dbgpt/core/awel/flow/base.py index 8e9441818..20b2c5006 100644 --- a/dbgpt/core/awel/flow/base.py +++ b/dbgpt/core/awel/flow/base.py @@ -1,4 +1,5 @@ """The mixin of DAGs.""" + import abc import dataclasses import inspect @@ -337,6 +338,9 @@ class Parameter(TypeMetadata, Serializable): value: Optional[Any] = Field( None, description="The value of the parameter(Saved in the dag file)" ) + alias: Optional[List[str]] = Field( + None, description="The alias of the parameter(Compatible with old version)" + ) @model_validator(mode="before") @classmethod @@ -398,6 +402,7 @@ def build_from( description: Optional[str] = None, options: Optional[Union[BaseDynamicOptions, List[OptionValue]]] = None, resource_type: ResourceType = ResourceType.INSTANCE, + alias: Optional[List[str]] = None, ): """Build the parameter from the type.""" type_name = type.__qualname__ @@ -419,6 +424,7 @@ def build_from( placeholder=placeholder, description=description or label, options=options, + alias=alias, ) @classmethod @@ -452,7 +458,7 @@ def build_from_ui(cls, data: Dict) -> "Parameter": def to_dict(self) -> Dict: """Convert current metadata to json dict.""" - dict_value = model_to_dict(self, exclude={"options"}) + dict_value = model_to_dict(self, exclude={"options", "alias"}) if not self.options: dict_value["options"] = None elif isinstance(self.options, BaseDynamicOptions): @@ -677,9 +683,18 @@ def get_runnable_parameters( for parameter in self.parameters if not parameter.optional } - current_parameters = { - parameter.name: parameter for parameter in self.parameters - } + current_parameters = {} + current_aliases_parameters = {} + for parameter in self.parameters: + current_parameters[parameter.name] = parameter + if parameter.alias: + for alias in parameter.alias: + if alias in current_aliases_parameters: + raise FlowMetadataException( + f"Alias {alias} already exists in the metadata." + ) + current_aliases_parameters[alias] = parameter + if len(view_required_parameters) < len(current_required_parameters): # TODO, skip the optional parameters. raise FlowParameterMetadataException( @@ -691,12 +706,16 @@ def get_runnable_parameters( ) for view_param in view_parameters: view_param_key = view_param.name - if view_param_key not in current_parameters: + if view_param_key in current_parameters: + current_parameter = current_parameters[view_param_key] + elif view_param_key in current_aliases_parameters: + current_parameter = current_aliases_parameters[view_param_key] + else: raise FlowParameterMetadataException( f"Parameter {view_param_key} not in the metadata." ) runnable_parameters.update( - current_parameters[view_param_key].to_runnable_parameter( + current_parameter.to_runnable_parameter( view_param.get_typed_value(), resources, key_to_resource_instance ) ) diff --git a/dbgpt/core/awel/flow/compat.py b/dbgpt/core/awel/flow/compat.py index 435139181..a70e223ca 100644 --- a/dbgpt/core/awel/flow/compat.py +++ b/dbgpt/core/awel/flow/compat.py @@ -1,8 +1,29 @@ """Compatibility mapping for flow classes.""" +from dataclasses import dataclass from typing import Dict, Optional -_COMPAT_FLOW_MAPPING: Dict[str, str] = {} + +@dataclass +class _RegisterItem: + """Register item for compatibility mapping.""" + + old_module: str + new_module: str + old_name: str + new_name: Optional[str] = None + after: Optional[str] = None + + def old_cls_key(self) -> str: + """Get the old class key.""" + return f"{self.old_module}.{self.old_name}" + + def new_cls_key(self) -> str: + """Get the new class key.""" + return f"{self.new_module}.{self.new_name}" + + +_COMPAT_FLOW_MAPPING: Dict[str, _RegisterItem] = {} _OLD_AGENT_RESOURCE_MODULE_1 = "dbgpt.serve.agent.team.layout.agent_operator_resource" @@ -11,17 +32,24 @@ def _register( - old_module: str, new_module: str, old_name: str, new_name: Optional[str] = None + old_module: str, + new_module: str, + old_name: str, + new_name: Optional[str] = None, + after_version: Optional[str] = None, ): if not new_name: new_name = old_name - _COMPAT_FLOW_MAPPING[f"{old_module}.{old_name}"] = f"{new_module}.{new_name}" + item = _RegisterItem(old_module, new_module, old_name, new_name, after_version) + _COMPAT_FLOW_MAPPING[item.old_cls_key()] = item def get_new_class_name(old_class_name: str) -> Optional[str]: """Get the new class name for the old class name.""" - new_cls_name = _COMPAT_FLOW_MAPPING.get(old_class_name, None) - return new_cls_name + if old_class_name not in _COMPAT_FLOW_MAPPING: + return None + item = _COMPAT_FLOW_MAPPING[old_class_name] + return item.new_cls_key() _register( @@ -54,3 +82,9 @@ def get_new_class_name(old_class_name: str) -> Optional[str]: _register( _OLD_AGENT_RESOURCE_MODULE_2, _NEW_AGENT_RESOURCE_MODULE, "AWELAgent", "AWELAgent" ) +_register( + "dbgpt.storage.vector_store.connector", + "dbgpt.serve.rag.connector", + "VectorStoreConnector", + after_version="v0.5.8", +) diff --git a/dbgpt/core/awel/flow/flow_factory.py b/dbgpt/core/awel/flow/flow_factory.py index 42c887379..b23633378 100644 --- a/dbgpt/core/awel/flow/flow_factory.py +++ b/dbgpt/core/awel/flow/flow_factory.py @@ -555,14 +555,6 @@ def build(self, flow_panel: FlowPanel) -> DAG: downstream = key_to_downstream.get(operator_key, []) if not downstream: raise ValueError("Branch operator should have downstream.") - if len(downstream) != len(view_metadata.parameters): - raise ValueError( - "Branch operator should have the same number of downstream as " - "parameters." - ) - for i, param in enumerate(view_metadata.parameters): - downstream_key, _, _ = downstream[i] - param.value = key_to_operator_nodes[downstream_key].data.name try: runnable_params = metadata.get_runnable_parameters( diff --git a/dbgpt/core/awel/operators/base.py b/dbgpt/core/awel/operators/base.py index 67edd7f2a..6f852fd97 100644 --- a/dbgpt/core/awel/operators/base.py +++ b/dbgpt/core/awel/operators/base.py @@ -137,6 +137,7 @@ def __init__( task_name: Optional[str] = None, dag: Optional[DAG] = None, runner: Optional[WorkflowRunner] = None, + can_skip_in_branch: bool = True, **kwargs, ) -> None: """Create a BaseOperator with an optional workflow runner. @@ -157,6 +158,7 @@ def __init__( self._runner: WorkflowRunner = runner self._dag_ctx: Optional[DAGContext] = None + self._can_skip_in_branch = can_skip_in_branch @property def current_dag_context(self) -> DAGContext: @@ -321,6 +323,10 @@ def current_event_loop_task_id(self) -> int: """Get the current event loop task id.""" return id(asyncio.current_task()) + def can_skip_in_branch(self) -> bool: + """Check if the operator can be skipped in the branch.""" + return self._can_skip_in_branch + def initialize_runner(runner: WorkflowRunner): """Initialize the default runner.""" diff --git a/dbgpt/core/awel/operators/common_operator.py b/dbgpt/core/awel/operators/common_operator.py index 310c9b530..fc2dc098b 100644 --- a/dbgpt/core/awel/operators/common_operator.py +++ b/dbgpt/core/awel/operators/common_operator.py @@ -16,6 +16,7 @@ ReduceFunc, TaskContext, TaskOutput, + is_empty_data, ) from .base import BaseOperator @@ -28,13 +29,16 @@ class JoinOperator(BaseOperator, Generic[OUT]): This node type is useful for combining the outputs of upstream nodes. """ - def __init__(self, combine_function: JoinFunc, **kwargs): + def __init__( + self, combine_function: JoinFunc, can_skip_in_branch: bool = True, **kwargs + ): """Create a JoinDAGNode with a combine function. Args: combine_function: A function that defines how to combine inputs. + can_skip_in_branch(bool): Whether the node can be skipped in a branch. """ - super().__init__(**kwargs) + super().__init__(can_skip_in_branch=can_skip_in_branch, **kwargs) if not callable(combine_function): raise ValueError("combine_function must be callable") self.combine_function = combine_function @@ -57,6 +61,12 @@ async def _do_run(self, dag_ctx: DAGContext) -> TaskOutput[OUT]: curr_task_ctx.set_task_output(join_output) return join_output + async def _return_first_non_empty(self, *inputs): + for data in inputs: + if not is_empty_data(data): + return data + raise ValueError("All inputs are empty") + class ReduceStreamOperator(BaseOperator, Generic[IN, OUT]): """Operator that reduces inputs using a custom reduce function.""" @@ -287,6 +297,32 @@ async def branches(self) -> Dict[BranchFunc[IN], BranchTaskType]: raise NotImplementedError +class BranchJoinOperator(JoinOperator, Generic[OUT]): + """Operator that joins inputs using a custom combine function. + + This node type is useful for combining the outputs of upstream nodes. + """ + + def __init__( + self, + combine_function: Optional[JoinFunc] = None, + can_skip_in_branch: bool = False, + **kwargs, + ): + """Create a JoinDAGNode with a combine function. + + Args: + combine_function: A function that defines how to combine inputs. + can_skip_in_branch(bool): Whether the node can be skipped in a branch( + default True). + """ + super().__init__( + combine_function=combine_function or self._return_first_non_empty, + can_skip_in_branch=can_skip_in_branch, + **kwargs, + ) + + class InputOperator(BaseOperator, Generic[OUT]): """Operator node that reads data from an input source.""" diff --git a/dbgpt/core/awel/runner/local_runner.py b/dbgpt/core/awel/runner/local_runner.py index bebb90ab5..dfe4bc21f 100644 --- a/dbgpt/core/awel/runner/local_runner.py +++ b/dbgpt/core/awel/runner/local_runner.py @@ -2,6 +2,7 @@ This runner will run the workflow in the current process. """ + import asyncio import logging import traceback @@ -11,7 +12,7 @@ from ..dag.base import DAGContext, DAGVar from ..operators.base import CALL_DATA, BaseOperator, WorkflowRunner -from ..operators.common_operator import BranchOperator, JoinOperator +from ..operators.common_operator import BranchOperator from ..task.base import SKIP_DATA, TaskContext, TaskState from ..task.task_impl import DefaultInputContext, DefaultTaskContext, SimpleTaskOutput from .job_manager import JobManager @@ -184,14 +185,14 @@ def _skip_current_downstream_by_node_name( return for child in branch_node.downstream: child = cast(BaseOperator, child) - if child.node_name in skip_nodes: + if child.node_name in skip_nodes or child.node_id in skip_node_ids: logger.info(f"Skip node name {child.node_name}, node id {child.node_id}") _skip_downstream_by_id(child, skip_node_ids) def _skip_downstream_by_id(node: BaseOperator, skip_node_ids: Set[str]): - if isinstance(node, JoinOperator): - # Not skip join node + if not node.can_skip_in_branch(): + # Current node can not skip, so skip its downstream return skip_node_ids.add(node.node_id) for child in node.downstream: diff --git a/dbgpt/core/awel/tests/test_run_dag.py b/dbgpt/core/awel/tests/test_run_dag.py index ca9f1c4d2..84ee690ce 100644 --- a/dbgpt/core/awel/tests/test_run_dag.py +++ b/dbgpt/core/awel/tests/test_run_dag.py @@ -130,7 +130,7 @@ def join_func(o1, o2) -> int: even_node = MapOperator( lambda x: 888, task_id="even_node", task_name="even_node_name" ) - join_node = JoinOperator(join_func) + join_node = JoinOperator(join_func, can_skip_in_branch=False) branch_node = BranchOperator( {lambda x: x % 2 == 1: odd_node, lambda x: x % 2 == 0: even_node} ) diff --git a/dbgpt/core/interface/operators/llm_operator.py b/dbgpt/core/interface/operators/llm_operator.py index c716bad66..53e34ffe5 100644 --- a/dbgpt/core/interface/operators/llm_operator.py +++ b/dbgpt/core/interface/operators/llm_operator.py @@ -2,12 +2,13 @@ import dataclasses from abc import ABC -from typing import Any, AsyncIterator, Dict, List, Optional, Union +from typing import Any, AsyncIterator, Dict, List, Optional, Union, cast from dbgpt._private.pydantic import BaseModel from dbgpt.core.awel import ( BaseOperator, BranchFunc, + BranchJoinOperator, BranchOperator, CommonLLMHttpRequestBody, CommonLLMHttpResponseBody, @@ -340,24 +341,7 @@ class LLMBranchOperator(BranchOperator[ModelRequest, ModelRequest]): category=OperatorCategory.LLM, operator_type=OperatorType.BRANCH, description=_("Branch the workflow based on the stream flag of the request."), - parameters=[ - Parameter.build_from( - _("Streaming Task Name"), - "stream_task_name", - str, - optional=True, - default="streaming_llm_task", - description=_("The name of the streaming task."), - ), - Parameter.build_from( - _("Non-Streaming Task Name"), - "no_stream_task_name", - str, - optional=True, - default="llm_task", - description=_("The name of the non-streaming task."), - ), - ], + parameters=[], inputs=[ IOField.build_from( _("Model Request"), @@ -382,7 +366,12 @@ class LLMBranchOperator(BranchOperator[ModelRequest, ModelRequest]): ], ) - def __init__(self, stream_task_name: str, no_stream_task_name: str, **kwargs): + def __init__( + self, + stream_task_name: Optional[str] = None, + no_stream_task_name: Optional[str] = None, + **kwargs, + ): """Create a new LLM branch operator. Args: @@ -390,18 +379,13 @@ def __init__(self, stream_task_name: str, no_stream_task_name: str, **kwargs): no_stream_task_name (str): The name of the non-streaming task. """ super().__init__(**kwargs) - if not stream_task_name: - raise ValueError("stream_task_name is not set") - if not no_stream_task_name: - raise ValueError("no_stream_task_name is not set") self._stream_task_name = stream_task_name self._no_stream_task_name = no_stream_task_name async def branches( self, ) -> Dict[BranchFunc[ModelRequest], Union[BaseOperator, str]]: - """ - Return a dict of branch function and task name. + """Return a dict of branch function and task name. Returns: Dict[BranchFunc[ModelRequest], str]: A dict of branch function and task @@ -409,6 +393,18 @@ async def branches( If the predicate function returns True, we will run the corresponding task. """ + if self._stream_task_name and self._no_stream_task_name: + stream_task_name = self._stream_task_name + no_stream_task_name = self._no_stream_task_name + else: + stream_task_name = "" + no_stream_task_name = "" + for node in self.downstream: + task = cast(BaseOperator, node) + if task.streaming_operator: + stream_task_name = node.node_name + else: + no_stream_task_name = node.node_name async def check_stream_true(r: ModelRequest) -> bool: # If stream is true, we will run the streaming task. otherwise, we will run @@ -416,8 +412,8 @@ async def check_stream_true(r: ModelRequest) -> bool: return r.stream return { - check_stream_true: self._stream_task_name, - lambda x: not x.stream: self._no_stream_task_name, + check_stream_true: stream_task_name, + lambda x: not x.stream: no_stream_task_name, } @@ -553,3 +549,93 @@ async def map(self, input_value: str) -> ModelOutput: text=input_value, error_code=500, ) + + +class LLMBranchJoinOperator(BranchJoinOperator[ModelOutput]): + """The LLM Branch Join Operator. + + Decide which output to keep(streaming or non-streaming). + """ + + streaming_operator = True + metadata = ViewMetadata( + label=_("LLM Branch Join Operator"), + name="llm_branch_join_operator", + category=OperatorCategory.LLM, + operator_type=OperatorType.JOIN, + description=_("Just keep the first non-empty output."), + parameters=[], + inputs=[ + IOField.build_from( + _("Streaming Model Output"), + "stream_output", + ModelOutput, + is_list=True, + description=_("The streaming output."), + ), + IOField.build_from( + _("Non-Streaming Model Output"), + "not_stream_output", + ModelOutput, + description=_("The non-streaming output."), + ), + ], + outputs=[ + IOField.build_from( + _("Model Output"), + "output_value", + ModelOutput, + is_list=True, + description=_("The output value of the operator."), + ), + ], + ) + + def __init__(self, **kwargs): + """Create a new LLM branch join operator.""" + super().__init__(**kwargs) + + +class StringBranchJoinOperator(BranchJoinOperator[str]): + """The String Branch Join Operator. + + Decide which output to keep(streaming or non-streaming). + """ + + streaming_operator = True + metadata = ViewMetadata( + label=_("String Branch Join Operator"), + name="string_branch_join_operator", + category=OperatorCategory.COMMON, + operator_type=OperatorType.JOIN, + description=_("Just keep the first non-empty output."), + parameters=[], + inputs=[ + IOField.build_from( + _("Streaming String Output"), + "stream_output", + str, + is_list=True, + description=_("The streaming output."), + ), + IOField.build_from( + _("Non-Streaming String Output"), + "not_stream_output", + str, + description=_("The non-streaming output."), + ), + ], + outputs=[ + IOField.build_from( + _("String Output"), + "output_value", + str, + is_list=True, + description=_("The output value of the operator."), + ), + ], + ) + + def __init__(self, **kwargs): + """Create a new LLM branch join operator.""" + super().__init__(**kwargs) diff --git a/dbgpt/core/operators/__init__.py b/dbgpt/core/operators/__init__.py index f352b6673..b31c0d1c2 100644 --- a/dbgpt/core/operators/__init__.py +++ b/dbgpt/core/operators/__init__.py @@ -8,6 +8,7 @@ BaseLLM, BaseLLMOperator, BaseStreamingLLMOperator, + LLMBranchJoinOperator, LLMBranchOperator, RequestBuilderOperator, ) @@ -32,6 +33,7 @@ "BaseLLM", "LLMBranchOperator", "BaseLLMOperator", + "LLMBranchJoinOperator", "RequestBuilderOperator", "BaseStreamingLLMOperator", "BaseConversationOperator", diff --git a/dbgpt/rag/embedding/rerank.py b/dbgpt/rag/embedding/rerank.py index a6b2dad27..dd01b5d14 100644 --- a/dbgpt/rag/embedding/rerank.py +++ b/dbgpt/rag/embedding/rerank.py @@ -17,7 +17,7 @@ class CrossEncoderRerankEmbeddings(BaseModel, RerankEmbeddings): client: Any #: :meta private: model_name: str = "BAAI/bge-reranker-base" - max_length: int = None # type: ignore + max_length: Optional[int] = None """Max length for input sequences. Longer sequences will be truncated. If None, max length of the model will be used""" """Model name to use.""" diff --git a/dbgpt/rag/operators/embedding.py b/dbgpt/rag/operators/embedding.py index 7054f4a04..da0b1cfd3 100644 --- a/dbgpt/rag/operators/embedding.py +++ b/dbgpt/rag/operators/embedding.py @@ -29,9 +29,10 @@ class EmbeddingRetrieverOperator(RetrieverOperator[Union[str, List[str]], List[C parameters=[ Parameter.build_from( _("Storage Index Store"), - "vector_store_connector", + "index_store", IndexStoreBase, description=_("The vector store connector."), + alias=["vector_store_connector"], ), Parameter.build_from( _("Top K"), @@ -128,9 +129,10 @@ class EmbeddingAssemblerOperator(AssemblerOperator[Knowledge, List[Chunk]]): parameters=[ Parameter.build_from( _("Vector Store Connector"), - "vector_store_connector", + "index_store", IndexStoreBase, description=_("The vector store connector."), + alias=["vector_store_connector"], ), Parameter.build_from( _("Chunk Parameters"), diff --git a/dbgpt/serve/rag/connector.py b/dbgpt/serve/rag/connector.py index cb621b56c..c6ede9d23 100644 --- a/dbgpt/serve/rag/connector.py +++ b/dbgpt/serve/rag/connector.py @@ -56,6 +56,8 @@ def _load_vector_options() -> List[OptionValue]: default=None, ), ], + # Compatible with the old version + alias=["dbgpt.storage.vector_store.connector.VectorStoreConnector"], ) class VectorStoreConnector: """The connector for vector store. diff --git a/docs/docs/awel/awel_tutorial/basic_syntax/2.4_branch_operator.md b/docs/docs/awel/awel_tutorial/basic_syntax/2.4_branch_operator.md index 614c1cf89..92ee691a0 100644 --- a/docs/docs/awel/awel_tutorial/basic_syntax/2.4_branch_operator.md +++ b/docs/docs/awel/awel_tutorial/basic_syntax/2.4_branch_operator.md @@ -123,7 +123,7 @@ with DAG("awel_branch_operator") as dag: task = BranchOperator(branches=branch_mapping) even_task = MapOperator(task_name="even_task", map_function=even_func) odd_task = MapOperator(task_name="odd_task", map_function=odd_func) - join_task = JoinOperator(combine_function=combine_function) + join_task = JoinOperator(combine_function=combine_function, can_skip_in_branch=False) input_task >> task >> even_task >> join_task input_task >> task >> odd_task >> join_task @@ -133,6 +133,8 @@ print("=" * 80) print("Second call, input is 6") assert asyncio.run(join_task.call(call_data=6)) == 60 ``` +Note: `can_skip_in_branch` is used to control whether current task can be skipped in the branch. +Set it to `False` to prevent the task from being skipped. And run the following command to execute the code: diff --git a/examples/awel/data_analyst_assistant.py b/examples/awel/data_analyst_assistant.py index 704676e4e..d837f1cfa 100644 --- a/examples/awel/data_analyst_assistant.py +++ b/examples/awel/data_analyst_assistant.py @@ -36,6 +36,7 @@ }' """ + import logging import os from functools import cache @@ -53,12 +54,17 @@ PromptTemplate, SystemPromptTemplate, ) -from dbgpt.core.awel import DAG, HttpTrigger, JoinOperator, MapOperator +from dbgpt.core.awel import ( + DAG, + BranchJoinOperator, + HttpTrigger, + JoinOperator, + MapOperator, +) from dbgpt.core.operators import ( BufferedConversationMapperOperator, HistoryDynamicPromptBuilderOperator, LLMBranchOperator, - RequestBuilderOperator, ) from dbgpt.model.operators import ( LLMOperator, @@ -318,7 +324,6 @@ async def build_model_request( ) prompt_template_load_task = PromptTemplateBuilderOperator() - request_handle_task = RequestBuilderOperator() # Load and store chat history chat_history_load_task = ServePreChatHistoryLoadOperator() @@ -343,9 +348,7 @@ async def build_model_request( ) model_parse_task = MapOperator(lambda out: out.to_dict()) openai_format_stream_task = OpenAIStreamingOutputOperator() - result_join_task = JoinOperator( - combine_function=lambda not_stream_out, stream_out: not_stream_out or stream_out - ) + result_join_task = BranchJoinOperator() trigger >> prompt_template_load_task >> history_prompt_build_task ( diff --git a/examples/awel/simple_chat_history_example.py b/examples/awel/simple_chat_history_example.py index 7e84985f4..6b900fdad 100644 --- a/examples/awel/simple_chat_history_example.py +++ b/examples/awel/simple_chat_history_example.py @@ -55,6 +55,7 @@ """ + import logging from typing import Dict, List, Optional, Union @@ -69,7 +70,7 @@ ModelRequestContext, SystemPromptTemplate, ) -from dbgpt.core.awel import DAG, HttpTrigger, JoinOperator, MapOperator +from dbgpt.core.awel import DAG, BranchJoinOperator, HttpTrigger, MapOperator from dbgpt.core.operators import ( ChatComposerInput, ChatHistoryPromptComposerOperator, @@ -150,9 +151,7 @@ async def build_model_request( ) model_parse_task = MapOperator(lambda out: out.to_dict()) openai_format_stream_task = OpenAIStreamingOutputOperator() - result_join_task = JoinOperator( - combine_function=lambda not_stream_out, stream_out: not_stream_out or stream_out - ) + result_join_task = BranchJoinOperator() req_handle_task = MapOperator( lambda req: ChatComposerInput( diff --git a/examples/awel/simple_llm_client_example.py b/examples/awel/simple_llm_client_example.py index d243fb71f..92ad3132b 100644 --- a/examples/awel/simple_llm_client_example.py +++ b/examples/awel/simple_llm_client_example.py @@ -35,12 +35,13 @@ }' """ + import logging from typing import Any, Dict, List, Optional, Union from dbgpt._private.pydantic import BaseModel, Field from dbgpt.core import LLMClient -from dbgpt.core.awel import DAG, HttpTrigger, JoinOperator, MapOperator +from dbgpt.core.awel import DAG, BranchJoinOperator, HttpTrigger, MapOperator from dbgpt.core.operators import LLMBranchOperator, RequestBuilderOperator from dbgpt.model.operators import ( LLMOperator, @@ -94,9 +95,7 @@ async def map(self, input_value: TriggerReqBody) -> Dict[str, Any]: ) model_parse_task = MapOperator(lambda out: out.to_dict()) openai_format_stream_task = OpenAIStreamingOutputOperator() - result_join_task = JoinOperator( - combine_function=lambda not_stream_out, stream_out: not_stream_out or stream_out - ) + result_join_task = BranchJoinOperator() trigger >> request_handle_task >> branch_task branch_task >> llm_task >> model_parse_task >> result_join_task diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_app.mo b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_app.mo index 23e4ad5f0c8b12cef5c446c46fdf6e9202c91726..030273dc5796a29a847b8e1169f4f53927813499 100644 GIT binary patch delta 124 zcmX@Y^p7dwo)F7a1|VPpVi_RT0b*7lwgF-g2moS!APxj#E=C51L?A5)#5q7V5WxXR mKNz?;>ADAmDEJqo<|#M^cy6@dVHARjJ>AprY|1{6C<6dTxe_}7 delta 65 wcmeyzbcD&`o)F7a1|VPrVi_P-0b*t#)&XJ=umEB%prj>`2C0F8jR`!A0Q-golK=n! diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_app.po b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_app.po index 4e289328a..5505757d1 100644 --- a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_app.po +++ b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_app.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-03-24 11:28+0800\n" +"POT-Creation-Date: 2024-06-17 00:05+0800\n" "PO-Revision-Date: 2024-03-23 11:22+0800\n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" @@ -17,7 +17,6 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -#: ../dbgpt/app/dbgpt_server.py:54 ../dbgpt/app/dbgpt_server.py:55 -#, fuzzy +#: ../dbgpt/app/dbgpt_server.py:53 ../dbgpt/app/dbgpt_server.py:54 msgid "DB-GPT Open API" -msgstr "DB-GPT 开放 API" \ No newline at end of file +msgstr "DB-GPT 开放 API" diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_core.mo b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_core.mo index 9fc6427b3a3fc829ff53f86f292f7af7cad6edc4..8e58f044d62495711a0f6303b4f32843be12c263 100644 GIT binary patch delta 5125 zcmZ{mdr*~S9>*WFG!<_kh^Tk~$xu)-(aNN>ZAFtbFR8WVq8x-24jm3jmK`K7r53uc zVj7VaCgwfN2vYaf&NNN^VQZVyW!+uQ0cSFInkIKDVZ18@*L32%Y#z*u_8h>AA80*Avv1C1FB$3sn=2|aKL#0OIalVFV< zKLTTD|H+Pj0QoZ^9&BK)K-r09QXISv_JTPun*B{K75>Zu%OXfH=23`GX0vVA!Jf1a zK_%w|907k1m5~cj8()I_neTXLeGF;o3sa%SuZOaKyV~q;=27Vmy)Xe5L;g%PkLzG9 z90JckrSLqw9)1axk!uN08O(;t;9Qsq%ixEw9%{o?gwGPQ5z67`U{IblQjwvPP#e4j zmAVU-Uqc;9LcB3OU>3}Rli*}n1hruUoC426IXHktz2P*d^?5v$881wQm4nH@Qc_2U zJUjyB*=cw!db_?2yEaxqe9 zFNMnJdZ>*L!Rat~j*1KpC(k;Q@ldz=9?OTJ-s4)^ZiP3}{s{7C2JtTHbsWz_j^7RC z*cvE%TcPZofXc*Qp#uIK;&9N6WON)Ii=k5V6ucQ8gi3uFDg#$wI_&Rp9l8lB;s@Xz z(1EwW^KcrB#~7z*7DHvyZ}}{gz0+`%-v1A%==OdKx#%W?e5pMR>deca46K8r;VZU% z4(iDM3wy#;!o3fUfp@}$9^9mU&F0euX~y6MdYplZ@#kb7e8rlN@@ zPzI`?QnU+lJ>e{DMX*TMsI zXyKc7;BBalKDO=8Z2LQ?JJFx;H12_VZL@595>$lKp?;zZp)%{YTnF{q)~NqnQXfEwczRWY$0#ZiI^TH7JMPgPL~DstfOw&Swp*Ot9FIHfM$GQ)D3i4pge7|?XxhNb_i<07f=y=4HbFvICtIcP;bdSQ1c(O{i|$ylWp&W{TYA7 zwp&~V&AU{z@B)}kAEhIT-mxZD7YEjhBhF56_v$&l#4oxG9epNY=<)RlFolWnt^<%8f7Du#Yo<>cceVW zb34-eOlEa=&F|1U)LHN+Qn*4tDmoUGfiBESs0;;H^H_nrNac2vh+aWkkjics-#-he zdC+0>BASJ|q0X|2%09%0&yeZJKOS>n*K!99jx=bdQaOlv+RUYNYJtS&0^)rI8*kBh<%8 zr4b!Li_kK30QEs5(Gy5T7q$wepkkDaR0@%Pu#Tc7=y4Q$hzD<5q^z+lg?HNeQoU%a>p9!$@|&(|rQo%7ABur_{R&+bg`K<-cfetn*GnL983QM>!`K$ zUOelZ(Zyasft7og`AQ4Q-Kk28JotY}jLc%t&&o1q+wkF6M~pj%l4dvyV&j^6C4JjH z;bPV9_U(<~*VcyKtm!!Igf_OcAK4i?yw&NEa$;!cnGKjXPQ$LAv($fw`hfh8kUVAij_}Nf(W7GR-YonZ; zQSnVVqn30_aXZ_0uXE>^aO2)k{pPL?ICbgC&eF6L=T!Re0kdZ1c|uQa2yZ`X_dTC} zn+6BBH?_96zF>_^oioGJe(-2}^&3u3X0kJW^qC~rZpYEr!^f-H?I*4}mqup|tJ!w3 zYOQ;Zzn|x^x6_%OmDV(6%&Mqi;kv3&!~SsXv9{KR_NTUlcO1JeawF6KRaNaYWh@!$ zMii=RbSrJmhrf@`>6bYtj;+JZ&FYJU>So!wH*<^gV&)JhoSEZXGj`?3w$^8?p-p=_ zPMzvlyD8Lgz&!w#y1_S5X1 wm*t83NkYQS2RJfka%Sq#&@Uyzjn-+-8X4Vja(#H`N&YVGJkj(~*7|P$1EDdt7ytkO delta 3462 zcmYk-3v7*N9LMqJROu8gN89y+wyM>-o~natF?U5VcSDD{&voiD#xbi=cYTP3jLeJ> zMsjSkZZ;B|7_ynlM9h$}>|$oIB}-iP{hjj=PtO1SJkRsK?{j;f_dWX-UY{Je=8fEH zNIEniPDL7X8fVnxKw4efnE6ND#>8PeOu>Gr zex_hUV*;j(%=1+2MqOBmY4|NR!oOU7Y^*U}%1w|e(;n+#FI2-LQTLZ(JeH&8unRTd z{iyptcAmjl`Zrg})aAq;$yA!?~+AxmY}VoThQm+=Qw2S*r%8a#m- zU=?a0H&82e7qvoQP+8SK4eXkc-CJd&8LFCOm9?2qfzyfQ5}||I$VdkZWn3> zLDUQ@kwr9T-1#f6{!dr`5c_aGCc&6Y9D!Q#r3tKmI+<-$Xr`Z{AAiT@m`?Ayp%^ug zc^ovewWt}qgBsW&Ov4k{7H?oXOh^oui?K1~6{r>Y0DI%P02wtD$8faCa-2n|*Jv7c zzzwJ+JdJuQZlMMk$%fG*@S*M>ikd(vY9I$t6Z{D~qKA#3l`2HWXadD#a>z_Vb+{3A z<5BF1zoOpr*6hS?I0LmJhfxh&!lrl!wRhqfrRDyw9@O=fs;7T*(OqyGb>n}oT!ZJK4w6s}=eTkq>KXQT<*}%@VWumWq9#;^`hdNU zTEWB4lc=}mA_mmqO)~1}E~=x)PA?C-8s$t>hdHPf>WHkSnSfks%1}$YA2qWo)Bx_d z^R?OXUsBFR^>+`|-=i$nUllR5DpQ=Y)$wI2^k;DkHKV(z8OG7NZfu9z%|)o|$GZCIuDsNh*ErvG z^+#O!IM(F8?@%+pf|}rO0Wun))=v#5VKio87tBSCcrNOOO{lkGFSf>$$j{v4;KK%d z6!iKPqpn|q>UcBineRXi-~g)MKqZ+xG8dguEy4{IpjKiAvf<5U)N6Va)j$NjXhpoJ zhO$rt?SdLmiK{Pn?soN`A)gd;J6sg4!E7ZNe6!1~w4Y(NxrR%kV`!kD7TZ%dM3yKo9*tP3v54_0yVghPNe@7IG{l zRuLLlT|#Mkn9v)dY!sm{ih5p(T6A^A_*le{m@X(n72`zwl6Wn)y>0l*S~p_!b)PcJNblOWFm-{ zh&VzUQJ>(MgwhK{4l#$&pNp2=OH3q|5`JPFp_CBD@DxI68OJT*`SY(wVLtI3(SaC4 zbRv{qBf5uj#wL4NB~oh70W94L8~?jRsewmIODXUhvpO=`HQq z^x1Y$;|}(5<4jwX(aWAnO9-yccv#KuY#Ov}bG_&g04RlDe{}L2B1Y58!fnAJAmep9ed_AXGr>AwN^aMVWXTDnm8W)<%nTcHBFnnnJ#@pBYv zrhmdQFcW1bm=Ez~7DF}N3aFEP0A=8>@Q3gVNH?3_6kc{lLrj^4up10PWpoRqf6Y5k z`%fg86fpT1s@6Gp)rm(yWneCpftR3O!#z+XJ7{?n%5XIt0)GQlntwwDlFdz(Yy?y$ zCPO)%3UO%?E)$)24b+LwLZ$Kw911^!*fOmsSep!timV5acX2pWO~*s+%eU*rmO;yM z%a@>fWs|>7n4dFgisAs2fkTjfG3TI8eibS(&+==_<{U#Q?gYof{!ke!fpSn~xgW~z zdzQbp`ai?odjD_P8_oF&Ic)3(ABSV00$2i-%5`uc+-uh#TY6AE(u~bYX*N`<2SEig z6zTxuEoWN&Vy*FS%9tqS>mYq(wpt#5GIR?57@mg;pMeTw zEmQ{If;#bWr~~{FDl;j&ZbBNQ?`)ZuV4_GTL!D#}oB~63eFiFJpFn=*rqws&FO&l6 z0d=B*mcyYOjDthr9H_Tq6I5U|Q2RfHDs|#I6P>_=3ZxmWKN5C?%E(kG{Q{^jT@cE_ zUYG$3k>`*iL}MK?8gUSfMLtZaSy6dG5Aunjc$VFD`{a?Z48Ds{s5SfjPMKqLwC(|?k zFPmP&2giIC1&E8RNBESOQHX}JrP0Ah`l^zI?ONZG=kcHZi=<{6VQ(ye>VfHqzTLci z>5

rO{Z0v_fLYVq^@Wp;F1FdWDLbC5T1G01O8MChMKe~vJ@GBXbeVXA$kK=B9oA*h{lUP5+=`|nBj0TG6Z?l z>Sn?SqOX^RUa`K&T;y4#C8AM@lp*twyJI<%wRX`2@@X;45H+uvSdi~bbyu5-$qTIx z11%XvgFy=-Bv0x;8&w-hqerSds z@~7$N?@ygr#EF-?d1gjDROm*X$WkZjmdDFt<;mY?_U|-vr5h~^EO!dsScU6`ouZO> zxHS2%%xtG18VN0rIc0(HvUp&bYaUwXC7ZV$JaU#>5hW;R=5hrViJE8QZgjO%6bZ-N zRWZAY+D#F=DK9yw?dUeqK)BRN$C)f@JFA0lDMkn@FcEaiYfrbGm(e~F#kZ3mE_PQr zvvH7Y(e8y7lgfgmvvy&-!X{m9PMtt`xf?1h(+RDcu!NaxPJN+`FRchD!sqH6I7m^=l zkL{;tQpXN@2ex@t+v|_Mf2(psy5SzuCGTVh9#8MxSLq!&+VD|z-NnPH69=(n9^9H- zkTWn-{toQpR^z6f$qhOEpY!(bt~oIeEGDCwF_*RVMZJyA3sm>dtMgJGTu( zb}eTc_xM||ki3yIsvWx2j-PoKcKUHAyL6tCck$iSn>Bvs$mH~eJDjBPPL fE9rC3c`y?_Wq(}#>CN% z!+7+dp36gZU5u-Z37WF`ifO7k*-ZCk`z;k+i@Ry@Qm&=z8RvT0YZ%|uz9fL~U3>6I+NpCU*weno_Vl66V0egP{mC7MZ!5@$= z%pcZS)I^pts;p1Fdxn+&w+dm>ibyQ-t-X37$mta~2=QLDaxMBQJ!xk6PF=R;vl;V^AyKX)iQeU$UM; zb$9_a^AUUkzq9RRJ|4O$2Kod*nZtir&pb z#C9T!&_U8SJC9f>`>8xlY_S(~ICbLne@BG9mXBH?nGTmKe)SpP-;LVBg%b4IS|#ex z)Z2EObrI-e#~hvE_s zxSV52jiK8~2P2$@6?c=5baox<@VD_)TT7s&r>nQS)gSV#I_Gl6Q^rE?uO4(c2U0gV zBdN{KW_PyJ;m&kI?v>76cb>D?ljpqcsR-Tiw7Z-pZ(HbluPeftN}EfVIseg}ODCrL NFHgTUH2v8b^FO~^{F?v( diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_rag.po b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_rag.po index 359b0d3d0..2634d1318 100644 --- a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_rag.po +++ b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_rag.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-03-24 22:53+0800\n" +"POT-Creation-Date: 2024-06-17 00:05+0800\n" "PO-Revision-Date: 2024-03-23 11:22+0800\n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" @@ -26,96 +26,99 @@ msgid "Retrieve candidates from vector store." msgstr "从向量存储中检索候选项。" #: ../dbgpt/rag/operators/embedding.py:31 -#: ../dbgpt/rag/operators/embedding.py:130 -msgid "Vector Store Connector" -msgstr "向量存储连接器" +msgid "Storage Index Store" +msgstr "索引存储" #: ../dbgpt/rag/operators/embedding.py:34 -#: ../dbgpt/rag/operators/embedding.py:133 +#: ../dbgpt/rag/operators/embedding.py:134 msgid "The vector store connector." msgstr "向量存储连接器。" -#: ../dbgpt/rag/operators/embedding.py:37 +#: ../dbgpt/rag/operators/embedding.py:38 msgid "Top K" msgstr "前 K 个" -#: ../dbgpt/rag/operators/embedding.py:40 +#: ../dbgpt/rag/operators/embedding.py:41 msgid "The number of candidates." msgstr "候选项的数量。" -#: ../dbgpt/rag/operators/embedding.py:43 +#: ../dbgpt/rag/operators/embedding.py:44 msgid "Score Threshold" msgstr "分数阈值" -#: ../dbgpt/rag/operators/embedding.py:47 +#: ../dbgpt/rag/operators/embedding.py:48 msgid "" "The score threshold, if score of candidate is less than it, it will be " "filtered." msgstr "分数阈值,如果候选项的分数低于此值,则会被过滤。" -#: ../dbgpt/rag/operators/embedding.py:54 ../dbgpt/rag/retriever/rewrite.py:24 +#: ../dbgpt/rag/operators/embedding.py:55 ../dbgpt/rag/retriever/rewrite.py:24 msgid "Query Rewrite" msgstr "查询重写" -#: ../dbgpt/rag/operators/embedding.py:57 +#: ../dbgpt/rag/operators/embedding.py:58 msgid "The query rewrite resource." msgstr "查询重写资源。" -#: ../dbgpt/rag/operators/embedding.py:62 +#: ../dbgpt/rag/operators/embedding.py:63 msgid "Rerank" msgstr "重新排序" -#: ../dbgpt/rag/operators/embedding.py:65 +#: ../dbgpt/rag/operators/embedding.py:66 msgid "The rerank." msgstr "重新排序。" -#: ../dbgpt/rag/operators/embedding.py:72 +#: ../dbgpt/rag/operators/embedding.py:73 msgid "Query" msgstr "查询" -#: ../dbgpt/rag/operators/embedding.py:75 +#: ../dbgpt/rag/operators/embedding.py:76 msgid "The query to retrieve." msgstr "要检索的查询。" -#: ../dbgpt/rag/operators/embedding.py:80 +#: ../dbgpt/rag/operators/embedding.py:81 msgid "Candidates" msgstr "候选项" -#: ../dbgpt/rag/operators/embedding.py:83 +#: ../dbgpt/rag/operators/embedding.py:84 msgid "The retrieved candidates." msgstr "已检索的候选项。" -#: ../dbgpt/rag/operators/embedding.py:124 +#: ../dbgpt/rag/operators/embedding.py:125 msgid "Embedding Assembler Operator" msgstr "嵌入式装配算子" -#: ../dbgpt/rag/operators/embedding.py:126 +#: ../dbgpt/rag/operators/embedding.py:127 msgid "Load knowledge and assemble embedding chunks to vector store." msgstr "加载知识并将嵌入式块组装到向量存储中。" -#: ../dbgpt/rag/operators/embedding.py:136 ../dbgpt/rag/chunk_manager.py:24 +#: ../dbgpt/rag/operators/embedding.py:131 +msgid "Vector Store Connector" +msgstr "向量存储连接器" + +#: ../dbgpt/rag/operators/embedding.py:138 ../dbgpt/rag/chunk_manager.py:24 msgid "Chunk Parameters" msgstr "块参数" -#: ../dbgpt/rag/operators/embedding.py:139 +#: ../dbgpt/rag/operators/embedding.py:141 msgid "The chunk parameters." msgstr "块参数。" -#: ../dbgpt/rag/operators/embedding.py:146 ../dbgpt/rag/operators/summary.py:23 +#: ../dbgpt/rag/operators/embedding.py:148 ../dbgpt/rag/operators/summary.py:23 #: ../dbgpt/rag/operators/knowledge.py:39 msgid "Knowledge" msgstr "知识" -#: ../dbgpt/rag/operators/embedding.py:149 +#: ../dbgpt/rag/operators/embedding.py:151 msgid "The knowledge to be loaded." msgstr "" -#: ../dbgpt/rag/operators/embedding.py:154 +#: ../dbgpt/rag/operators/embedding.py:156 #: ../dbgpt/rag/operators/knowledge.py:121 msgid "Chunks" msgstr "块" -#: ../dbgpt/rag/operators/embedding.py:158 +#: ../dbgpt/rag/operators/embedding.py:160 msgid "The assembled chunks, it has been persisted to vector store." msgstr "已组装的块,已持久化到向量存储中。" @@ -124,18 +127,16 @@ msgid "Summary Operator" msgstr "" #: ../dbgpt/rag/operators/summary.py:20 -#, fuzzy msgid "The summary assembler operator." -msgstr "嵌入式装配算子" +msgstr "总结装备算子" #: ../dbgpt/rag/operators/summary.py:23 -#, fuzzy msgid "Knowledge datasource" msgstr "知识数据源" #: ../dbgpt/rag/operators/summary.py:28 msgid "Document summary" -msgstr "" +msgstr "总结文档" #: ../dbgpt/rag/operators/summary.py:36 ../dbgpt/rag/operators/rewrite.py:36 #: ../dbgpt/rag/retriever/rewrite.py:36 @@ -143,82 +144,73 @@ msgid "LLM Client" msgstr "LLM 客户端" #: ../dbgpt/rag/operators/summary.py:41 ../dbgpt/rag/operators/rewrite.py:39 -#, fuzzy msgid "The LLM Client." msgstr "LLM 客户端" #: ../dbgpt/rag/operators/summary.py:44 ../dbgpt/rag/operators/rewrite.py:42 -#, fuzzy msgid "Model name" msgstr "模型名称" #: ../dbgpt/rag/operators/summary.py:49 -#, fuzzy msgid "LLM model name" msgstr "LLM 模型名称。" #: ../dbgpt/rag/operators/summary.py:52 ../dbgpt/rag/operators/summary.py:57 #: ../dbgpt/rag/operators/summary.py:65 -#, fuzzy msgid "prompt language" -msgstr "语言" +msgstr "prompt 语言" #: ../dbgpt/rag/operators/summary.py:60 msgid "Max iteration with LLM" -msgstr "" +msgstr "LLM 最大迭代次数" #: ../dbgpt/rag/operators/summary.py:68 msgid "Concurrency limit with LLM" -msgstr "" +msgstr "LLM 并发限制" #: ../dbgpt/rag/operators/summary.py:73 msgid "The concurrency limit with llm" msgstr "" #: ../dbgpt/rag/operators/rewrite.py:16 -#, fuzzy msgid "Query Rewrite Operator" -msgstr "查询重写" +msgstr "查询重写算子" #: ../dbgpt/rag/operators/rewrite.py:19 -#, fuzzy msgid "Query rewrite operator." -msgstr "查询重写。" +msgstr "查询重写算子。" #: ../dbgpt/rag/operators/rewrite.py:22 -#, fuzzy msgid "Query context" -msgstr "查询重写" +msgstr "查询上下文" #: ../dbgpt/rag/operators/rewrite.py:22 msgid "query context" -msgstr "" +msgstr "查询上下文" #: ../dbgpt/rag/operators/rewrite.py:27 ../dbgpt/rag/operators/rewrite.py:31 msgid "Rewritten queries" -msgstr "" +msgstr "重写后的查询" #: ../dbgpt/rag/operators/rewrite.py:47 -#, fuzzy msgid "LLM model name." msgstr "LLM 模型名称。" #: ../dbgpt/rag/operators/rewrite.py:50 -#, fuzzy msgid "Prompt language" -msgstr "语言" +msgstr "Prompt 语言。" #: ../dbgpt/rag/operators/rewrite.py:55 msgid "Prompt language." -msgstr "" +msgstr "Prompt 语言。" #: ../dbgpt/rag/operators/rewrite.py:58 msgid "Number of results" -msgstr "" +msgstr "结果数量" #: ../dbgpt/rag/operators/rewrite.py:63 msgid "rewrite query number." -msgstr "" +msgstr "重写查询数量。" #: ../dbgpt/rag/operators/knowledge.py:23 msgid "Knowledge Operator" @@ -234,7 +226,7 @@ msgstr "知识数据源" #: ../dbgpt/rag/operators/knowledge.py:34 msgid "knowledge datasource, which can be a document, url, or text." -msgstr "" +msgstr "知识数据源,可以是文档、网址或文本。" #: ../dbgpt/rag/operators/knowledge.py:42 msgid "Knowledge object." @@ -267,7 +259,8 @@ msgstr "将块转换为字符串。" #: ../dbgpt/rag/operators/knowledge.py:111 ../dbgpt/rag/chunk_manager.py:71 #: ../dbgpt/rag/text_splitter/text_splitter.py:211 #: ../dbgpt/rag/text_splitter/text_splitter.py:422 -#: ../dbgpt/rag/text_splitter/text_splitter.py:804 +#: ../dbgpt/rag/text_splitter/text_splitter.py:821 +#: ../dbgpt/rag/text_splitter/text_splitter.py:864 msgid "Separator" msgstr "分隔符" @@ -287,81 +280,81 @@ msgstr "字符串" msgid "The output string." msgstr "输出的字符串。" -#: ../dbgpt/rag/embedding/embedding_factory.py:230 +#: ../dbgpt/rag/embedding/embedding_factory.py:250 msgid "Default Embeddings" msgstr "默认嵌入式" -#: ../dbgpt/rag/embedding/embedding_factory.py:234 +#: ../dbgpt/rag/embedding/embedding_factory.py:254 msgid "Default embeddings(using default embedding model of current system)" msgstr "默认嵌入式(使用当前系统的默认嵌入式模型)" -#: ../dbgpt/rag/embedding/embeddings.py:28 +#: ../dbgpt/rag/embedding/embeddings.py:27 msgid "HuggingFace Embeddings" msgstr "HuggingFace 嵌入式" -#: ../dbgpt/rag/embedding/embeddings.py:31 +#: ../dbgpt/rag/embedding/embeddings.py:30 msgid "HuggingFace sentence_transformers embedding models." msgstr "HuggingFace 句子转换嵌入式模型。" -#: ../dbgpt/rag/embedding/embeddings.py:34 -#: ../dbgpt/rag/embedding/embeddings.py:141 -#: ../dbgpt/rag/embedding/embeddings.py:348 +#: ../dbgpt/rag/embedding/embeddings.py:33 +#: ../dbgpt/rag/embedding/embeddings.py:139 +#: ../dbgpt/rag/embedding/embeddings.py:346 #: ../dbgpt/rag/embedding/embeddings.py:461 -#: ../dbgpt/rag/embedding/embeddings.py:553 +#: ../dbgpt/rag/embedding/embeddings.py:566 #: ../dbgpt/rag/retriever/rewrite.py:30 msgid "Model Name" msgstr "模型名称" -#: ../dbgpt/rag/embedding/embeddings.py:39 -#: ../dbgpt/rag/embedding/embeddings.py:146 +#: ../dbgpt/rag/embedding/embeddings.py:38 +#: ../dbgpt/rag/embedding/embeddings.py:144 msgid "Model name to use." msgstr "要使用的模型名称。" -#: ../dbgpt/rag/embedding/embeddings.py:135 +#: ../dbgpt/rag/embedding/embeddings.py:133 msgid "HuggingFace Instructor Embeddings" msgstr "HuggingFace 指导嵌入式" -#: ../dbgpt/rag/embedding/embeddings.py:138 +#: ../dbgpt/rag/embedding/embeddings.py:136 msgid "HuggingFace Instructor embeddings." msgstr "HuggingFace 指导嵌入式。" -#: ../dbgpt/rag/embedding/embeddings.py:149 +#: ../dbgpt/rag/embedding/embeddings.py:147 msgid "Embed Instruction" msgstr "嵌入指令" -#: ../dbgpt/rag/embedding/embeddings.py:154 +#: ../dbgpt/rag/embedding/embeddings.py:152 msgid "Instruction to use for embedding documents." msgstr "用于嵌入文档的指令。" -#: ../dbgpt/rag/embedding/embeddings.py:157 +#: ../dbgpt/rag/embedding/embeddings.py:155 msgid "Query Instruction" msgstr "查询指令" -#: ../dbgpt/rag/embedding/embeddings.py:162 +#: ../dbgpt/rag/embedding/embeddings.py:160 msgid "Instruction to use for embedding query." msgstr "用于嵌入查询的指令。" -#: ../dbgpt/rag/embedding/embeddings.py:336 +#: ../dbgpt/rag/embedding/embeddings.py:334 msgid "HuggingFace Inference API Embeddings" msgstr "HuggingFace 推理 API 嵌入式" -#: ../dbgpt/rag/embedding/embeddings.py:339 +#: ../dbgpt/rag/embedding/embeddings.py:337 msgid "HuggingFace Inference API embeddings." msgstr "HuggingFace 推理 API 嵌入式。" -#: ../dbgpt/rag/embedding/embeddings.py:342 +#: ../dbgpt/rag/embedding/embeddings.py:340 #: ../dbgpt/rag/embedding/embeddings.py:455 -#: ../dbgpt/rag/embedding/embeddings.py:545 +#: ../dbgpt/rag/embedding/embeddings.py:558 msgid "API Key" msgstr "API 密钥" -#: ../dbgpt/rag/embedding/embeddings.py:345 +#: ../dbgpt/rag/embedding/embeddings.py:343 msgid "Your API key for the HuggingFace Inference API." msgstr "您用于 HuggingFace 推理 API 的 API 密钥。" -#: ../dbgpt/rag/embedding/embeddings.py:353 +#: ../dbgpt/rag/embedding/embeddings.py:351 #: ../dbgpt/rag/embedding/embeddings.py:466 -#: ../dbgpt/rag/embedding/embeddings.py:558 +#: ../dbgpt/rag/embedding/embeddings.py:571 msgid "The name of the model to use for text embeddings." msgstr "用于文本嵌入的模型名称。" @@ -377,31 +370,31 @@ msgstr "Jina AI 嵌入式。" msgid "Your API key for the Jina AI API." msgstr "您用于 Jina AI API 的 API 密钥。" -#: ../dbgpt/rag/embedding/embeddings.py:531 +#: ../dbgpt/rag/embedding/embeddings.py:544 msgid "OpenAPI Embeddings" msgstr "OpenAPI 嵌入式" -#: ../dbgpt/rag/embedding/embeddings.py:534 +#: ../dbgpt/rag/embedding/embeddings.py:547 msgid "OpenAPI embeddings." msgstr "OpenAPI 嵌入式。" -#: ../dbgpt/rag/embedding/embeddings.py:537 +#: ../dbgpt/rag/embedding/embeddings.py:550 msgid "API URL" msgstr "API 网址" -#: ../dbgpt/rag/embedding/embeddings.py:542 +#: ../dbgpt/rag/embedding/embeddings.py:555 msgid "The URL of the embeddings API." msgstr "嵌入式 API 的网址。" -#: ../dbgpt/rag/embedding/embeddings.py:550 +#: ../dbgpt/rag/embedding/embeddings.py:563 msgid "Your API key for the Open API." msgstr "您用于 Open API 的 API 密钥。" -#: ../dbgpt/rag/embedding/embeddings.py:561 +#: ../dbgpt/rag/embedding/embeddings.py:574 msgid "Timeout" msgstr "超时时间" -#: ../dbgpt/rag/embedding/embeddings.py:566 +#: ../dbgpt/rag/embedding/embeddings.py:579 msgid "The timeout for the request in seconds." msgstr "请求的超时时间(秒)。" @@ -439,6 +432,7 @@ msgid "Chunk size" msgstr "块大小" #: ../dbgpt/rag/chunk_manager.py:63 +#: ../dbgpt/rag/text_splitter/text_splitter.py:414 msgid "Chunk Overlap" msgstr "块重叠" @@ -454,22 +448,46 @@ msgstr "启用合并" msgid "Enable chunk merge by chunk_size." msgstr "通过块大小启用块合并。" -#: ../dbgpt/rag/retriever/rerank.py:56 +#: ../dbgpt/rag/retriever/rerank.py:88 msgid "Default Ranker" msgstr "默认排序器" -#: ../dbgpt/rag/retriever/rerank.py:59 +#: ../dbgpt/rag/retriever/rerank.py:91 msgid "Default ranker(Rank by score)." msgstr "默认排序器(按分数排序)。" -#: ../dbgpt/rag/retriever/rerank.py:62 +#: ../dbgpt/rag/retriever/rerank.py:94 ../dbgpt/rag/retriever/rerank.py:184 msgid "Top k" msgstr "前 k 个" -#: ../dbgpt/rag/retriever/rerank.py:65 +#: ../dbgpt/rag/retriever/rerank.py:97 ../dbgpt/rag/retriever/rerank.py:187 msgid "The number of top k documents." msgstr "前 k 个文档的数量。" +#: ../dbgpt/rag/retriever/rerank.py:178 +msgid "CrossEncoder Rerank" +msgstr "" + +#: ../dbgpt/rag/retriever/rerank.py:181 +msgid "CrossEncoder ranker." +msgstr "" + +#: ../dbgpt/rag/retriever/rerank.py:190 +msgid "Rerank Model" +msgstr "重排序模型" + +#: ../dbgpt/rag/retriever/rerank.py:193 +msgid "rerank model name, e.g., 'BAAI/bge-reranker-base'." +msgstr "重排模型,例如 'BAAI/bge-reranker-base'。" + +#: ../dbgpt/rag/retriever/rerank.py:196 +msgid "device" +msgstr "设备" + +#: ../dbgpt/rag/retriever/rerank.py:199 +msgid "device name, e.g., 'cpu'." +msgstr "设备名称,例如 'cpu'。" + #: ../dbgpt/rag/retriever/rewrite.py:27 msgid "Query rewrite." msgstr "查询重写。" @@ -496,8 +514,8 @@ msgstr "字符文本分割器" #: ../dbgpt/rag/text_splitter/text_splitter.py:214 #: ../dbgpt/rag/text_splitter/text_splitter.py:425 -#: ../dbgpt/rag/text_splitter/text_splitter.py:807 -#: ../dbgpt/rag/text_splitter/text_splitter.py:850 +#: ../dbgpt/rag/text_splitter/text_splitter.py:824 +#: ../dbgpt/rag/text_splitter/text_splitter.py:867 msgid "Separator to split the text." msgstr "用于分割文本的分隔符。" @@ -513,6 +531,10 @@ msgstr "递归按字符分割文本。" msgid "Spacy Text Splitter" msgstr "Spacy 文本分割器" +#: ../dbgpt/rag/text_splitter/text_splitter.py:335 +msgid "Pipeline" +msgstr "" + #: ../dbgpt/rag/text_splitter/text_splitter.py:338 msgid "Spacy pipeline to use for tokenization." msgstr "用于标记化的 Spacy 流水线。" @@ -525,6 +547,10 @@ msgstr "使用 Spacy 按句子分割文本。" msgid "Markdown Header Text Splitter" msgstr "Markdown 标题文本分割器" +#: ../dbgpt/rag/text_splitter/text_splitter.py:398 +msgid "Return Each Line" +msgstr "" + #: ../dbgpt/rag/text_splitter/text_splitter.py:401 msgid "Return each line with associated headers." msgstr "返回每行及其相关标题。" @@ -533,22 +559,26 @@ msgstr "返回每行及其相关标题。" msgid "Size of each chunk." msgstr "每个块的大小。" +#: ../dbgpt/rag/text_splitter/text_splitter.py:417 +msgid "Overlap between chunks." +msgstr "块之间的重叠 token 数。" + #: ../dbgpt/rag/text_splitter/text_splitter.py:430 msgid "Split markdown text by headers." msgstr "通过标题分割 Markdown 文本。" -#: ../dbgpt/rag/text_splitter/text_splitter.py:799 +#: ../dbgpt/rag/text_splitter/text_splitter.py:816 msgid "Separator Text Splitter" msgstr "分隔符文本分割器" -#: ../dbgpt/rag/text_splitter/text_splitter.py:812 +#: ../dbgpt/rag/text_splitter/text_splitter.py:829 msgid "Split text by separator." msgstr "通过分隔符分割文本。" -#: ../dbgpt/rag/text_splitter/text_splitter.py:842 +#: ../dbgpt/rag/text_splitter/text_splitter.py:859 msgid "Page Text Splitter" msgstr "页面文本分割器" -#: ../dbgpt/rag/text_splitter/text_splitter.py:855 +#: ../dbgpt/rag/text_splitter/text_splitter.py:872 msgid "Split text by page." msgstr "按页面分割文本。" diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_serve.mo b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_serve.mo index 5a7dcfe894d589b2c3d8e2f683194b8b497c8f6c..6046d24e6c62706e5e1137f4e23d1d57e2ab6076 100644 GIT binary patch literal 2607 zcmZvc-)|IE6vwYxf33d|!QbK)jmGFKZN-Ev7*pDU4YbsDtuGKW?%r;f?#`?;vz77$ zOi66`yWLiAGW(f(?>YCL z?>T4omz5Pq7{*#WPvd#@HpbS0n{LMs#vkD8;Gf_saNQlswhmm0c?`T8+zZ|Z_JEIr zL*Pr`1h@f1Rn$Y!27`yAjS9<_&oR{NcaB^ zl6?gZ<=`6dQSeRhDX6ZjWMx*G0L_H7`|cY$=yZl8NWif_>8 zDUi;+0@8Wk`n>7y{|!?9SHXy=f#kmhq&Rl^G{F}z?*S=K6F$EK>HeEOZ-M0h4@mK? zMpB4tL8LZ9U7T1$iKfMewOv`8&RHK&0x%6m9XFsN53kJNKCNDIY} zHj;ID21E+kBX}UWG@ip7)sb?u0uO0>5>Ii+*lDsk9Zy7gQrNa06E<(R%p_0YC*^QQ zBvT395nMN-Jkp^%yd!Qqrqvx}wKQv1W?|oq)rodJop89^d`p?~?WP{(J5s{Z;mNj` zR#JBy5#^2kjm$SJl5Z2;Y@13ccu{B=LCDaSr8&rGr@fS+VolU~DfML3DY_|se~rhDV#%WMh@uo)-((` zU%K3|#AoSV7}v6QI|M?sxMQLoMwhVAf)HC&iDg@6GRUy)h&>MFptIB}HnmESEnDE0 zfRn&!-1NIJ$Sgs9O--FPI_WY8QkXn|8z^DWe=pgo`&in>F>xc6cGN2x0HsG0?uK5i zA*!_r%SJ*%yt$@!Tg}#b-d5kz+OVT3&>~W%JUZ|x z_7A#e&bveX?(ijVyw{zZ^{xzf`=@{1*Td9axdU66J66b?DR*)QPZj3p^O-SsYJ_>G zuDRES-9ty!Axxd?V_`JI@+Z$0ru*~fXBWmU`#$AM>aP6gNpE1p-$uwBWyPOBey%q+ zcP0O6uRC?j?U~GFr_|jlnnGsAoj-Ex+M&XYzRbEiHRkqZDR7x@ zifuF_4W%OY^U0F5CFdwf<)KTbk&hM*442}~Uz~%o|JiUe)VcE<2dZ9`ueLSia|dxi5eD xw5qV&`z-7BeeIqYbNl<;%r{FaBKK9pNUPGJEI&Hw4O}f;|HSP(>recO{tKpvrceL? delta 171 zcmZ24a-6mPo)F7a1|VPsVi_QI0b+I_&H-W&=m26RAnpX>SRh^l#JoVv#mK-Q1f(T^ zG>EMY#BxBs850A8ACL|O(mX(V36K^A(i?#^PzfBc09imgIT@IN44@ptWM^iT$>*7K aCVylR-fY9h%{aM(U4HT}_L#~4IK%*KtP}kJ diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_serve.po b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_serve.po index 57ca7e2f4..957af5d3b 100644 --- a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_serve.po +++ b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_serve.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-03-24 22:53+0800\n" +"POT-Creation-Date: 2024-06-17 00:05+0800\n" "PO-Revision-Date: 2024-03-24 11:24+0800\n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" @@ -19,93 +19,113 @@ msgstr "" #: ../dbgpt/serve/rag/operators/knowledge_space.py:47 msgid "Knowledge Space Operator" -msgstr "" +msgstr "知识空间算子" #: ../dbgpt/serve/rag/operators/knowledge_space.py:50 msgid "knowledge space retriever operator." -msgstr "" +msgstr "知识空间检索算子。" #: ../dbgpt/serve/rag/operators/knowledge_space.py:51 msgid "Query" -msgstr "" +msgstr "查询" #: ../dbgpt/serve/rag/operators/knowledge_space.py:51 msgid "user query" -msgstr "" +msgstr "用户查询" #: ../dbgpt/serve/rag/operators/knowledge_space.py:54 #: ../dbgpt/serve/rag/operators/knowledge_space.py:57 msgid "related chunk content" -msgstr "" +msgstr "相关块内容" #: ../dbgpt/serve/rag/operators/knowledge_space.py:62 msgid "Space Name" -msgstr "" +msgstr "空间名称" #: ../dbgpt/serve/rag/operators/knowledge_space.py:68 msgid "space name." -msgstr "" +msgstr "空间名称。" #: ../dbgpt/serve/rag/operators/knowledge_space.py:143 msgid "Knowledge Space Prompt Builder Operator" -msgstr "" +msgstr "知识空间提示生成算子" #: ../dbgpt/serve/rag/operators/knowledge_space.py:145 msgid "Build messages from prompt template and chat history." -msgstr "" +msgstr "从提示模板和聊天历史构建消息。" #: ../dbgpt/serve/rag/operators/knowledge_space.py:150 msgid "Chat Prompt Template" -msgstr "" +msgstr "聊天提示模板" #: ../dbgpt/serve/rag/operators/knowledge_space.py:153 msgid "The chat prompt template." -msgstr "" +msgstr "聊天提示模板。" #: ../dbgpt/serve/rag/operators/knowledge_space.py:156 msgid "History Key" -msgstr "" +msgstr "历史键" #: ../dbgpt/serve/rag/operators/knowledge_space.py:161 msgid "The key of history in prompt dict." -msgstr "" +msgstr "提示字典中历史的键。" #: ../dbgpt/serve/rag/operators/knowledge_space.py:164 msgid "String History" -msgstr "" +msgstr "字符串历史" #: ../dbgpt/serve/rag/operators/knowledge_space.py:169 msgid "Whether to convert the history to string." -msgstr "" +msgstr "是否将历史转换为字符串。" #: ../dbgpt/serve/rag/operators/knowledge_space.py:174 #: ../dbgpt/serve/rag/operators/knowledge_space.py:178 msgid "user input" -msgstr "" +msgstr "用户输入" #: ../dbgpt/serve/rag/operators/knowledge_space.py:181 msgid "space related context" -msgstr "" +msgstr "空间相关上下文" #: ../dbgpt/serve/rag/operators/knowledge_space.py:185 msgid "context of knowledge space." -msgstr "" +msgstr "知识空间的上下文。" #: ../dbgpt/serve/rag/operators/knowledge_space.py:188 msgid "History" -msgstr "" +msgstr "历史" #: ../dbgpt/serve/rag/operators/knowledge_space.py:192 msgid "The history." -msgstr "" +msgstr "历史。" #: ../dbgpt/serve/rag/operators/knowledge_space.py:197 msgid "Formatted Messages" -msgstr "" +msgstr "格式化消息" #: ../dbgpt/serve/rag/operators/knowledge_space.py:201 msgid "The formatted messages." -msgstr "" +msgstr "格式化的消息。" + +#: ../dbgpt/serve/rag/connector.py:39 +msgid "Vector Store Connector" +msgstr "向量存储连接器" + +#: ../dbgpt/serve/rag/connector.py:44 +msgid "Vector Store Type" +msgstr "向量存储类型" + +#: ../dbgpt/serve/rag/connector.py:47 +msgid "The type of vector store." +msgstr "向量存储的类型。" + +#: ../dbgpt/serve/rag/connector.py:51 +msgid "Vector Store Implementation" +msgstr "向量存储实现" + +#: ../dbgpt/serve/rag/connector.py:54 +msgid "The vector store implementation." +msgstr "向量存储的实现。" #: ../dbgpt/serve/conversation/operators.py:87 msgid "Default Chat History Load Operator" @@ -127,8 +147,8 @@ msgstr "这是模型请求。" #: ../dbgpt/serve/conversation/operators.py:105 msgid "Stored Messages" -msgstr "" +msgstr "存储的消息" #: ../dbgpt/serve/conversation/operators.py:108 msgid "The messages stored in the storage." -msgstr "" +msgstr "存储在存储中的消息。" \ No newline at end of file diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_storage.mo b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_storage.mo index 6386397d764b3c60d5216c672680754329844dbc..7d7c33922593fd0a7c4c0f630a8e9b3e37928506 100644 GIT binary patch delta 1576 zcmY+@TSyd97{KwPuIZ|^YGrnHbhUf=QV>KDcF``78QKLO+H}2uxUp+i5UfTw0<#(o z6N{{egh(Z$i&{ZKK14lcPd!AlTQ3q)5d_iyo0;12z?t8iGiT2EzB6aW^M2=tKW4f* z6{V26h}sdS)K*MO;6llEDV2!xaSrC72iIW+ZnN7Jm`S?^W!x!D#mhJwyY2pdlyPG? zOR2DWZ+940hx%ok#Hv{g%tBdk4JP9zWQf{k+eD%zIhBTVsVtN;u&p+7~uy3;UbtnNfpac*?+4>gD z#_PBQ2hiL(lmrcw@jo#a6EfL<$*_PUxgFOdJFoJo@&e<`KSQ#0mU%P^s4J)(AG56H zx|F)sYMR%0t`e}UA%De6s+@@&s#)awYgBf`%m~c0ATE}RT_lV7{~9P+E~iS?Qr1|g zRVbNDc2eYk=26MZ{Eg-F${FG4n8jQ}{`+Nihinh$PI;-xdS^m^cyE>8*I48W`L%7n zCf`wRRIIBc5NdKXOMRixi9oR0*%b&nN0f*BLC4bye^pZ;sO{(CSEa?;^tQ}4bkOB; z>CMTflgoqkN?zBGxHs;qwqwz1c)i)0h3FdmAr+8-B~GnQqKh?;Nhrtc5VfX&))1(z zZ`8Zp-kHal_kYjOsQY`IBkxG(V5F_z7}kv|ZI0MKKOKqm3>!Uz#?2N-w=r-&db`!p z5xqZPwBL1hbo50oKAAq5apmgQ3+={(F5^s}Y=klNDD_){?((GTZ#~T^5t(f(X>0Xc3VXZ}o*D{L# E0=C#NKmY&$ delta 1224 zcmY+@Pe>F|9Ki9Px>l~HyQ}7(thP-pgX|Dp3QSsxgoXUKTH&FkrdrBfSvR9VRwXHl z%$Nct*bWtVs8AFNLh=xKiB1tgV24<(LmjG%bm{w>nS%Y{o6mc{oq6-#Z=Qc@7Xzs! zpX;6w4YV5C=^T;$_#~GLF_9;dkCRx253mI1(2K8~@dxxVUPird6$`Q0B~psjsQE_J z=YzOTBqbem_>&$k>oJ0EjN?JPf+uhuyReeg^yu%H# z0z+8HTdVLGHt~K*&}k+ygMR#pEM7_r?1fd}2F5kG5%-|({3sS-1j{jodf!#tiuX|~ zIgf6tx{9n#T$DxcE5st+F9AAQlHHoX1CAlo7an(hzld7OQPc@0o%}TF#0!p#$h+hn z>UHpcoI@@BzGB-!)Cm?agf3RA6SQF^K0>{J-cdYu zK8RY$9@GV%M6J|W)Uz_^q5j$#C&3!zp>xAD>cnqQOZgcOVGebtlGg6QIyXB|zo(uZ z{VdyQ^)wBwo`%k?^J#3NL&AJAH+7V}+lq?neJhG&m;KiLSFo z_CunUC(BB9QZ~&`15jcr0d( zv(qhE4`=_@$r7a7hzvv{Ny8s*59z@CRIt-*bq8jL+`n>We-!=BGwZyb{F#@T^i6Zg zbAwSyYvx&cWo-J((1?WFTV&<#)bjP`k{Q4A?b_|k(}~Q;)Yn((<*~<^$*HWY{a99N V#hy6fZ8Gb<#iq|!YVP$N{tKukSn4Nt$F!IfT))!N9VYN&`|aG?9l`}@%{#m zfgj#`Yio1SFbFSq8NeNm7B^6GqaCO477Gfm4u~dGx*=(roMg$_g&;U&(%BW7QDmn3ooqgx^;0#qz><<7v1Qdz*rM=6c}X%x zXl5lN6Q*Gpn@q6@O=C^x_~UrECC6(|{I2wUz3!Y9#ZvB0iK!Ul^IpAY#LYRVSoIytykSxC)xK)*#;$K*A3oUSJNWxAu>8sw Y$aype`Q;YbrM!Jn^6jjb>c3_F0LHka=Kufz delta 66 xcmZ3^c7(~|o)F7a1|VPrVi_P-0b*t#)&XJ=umEB%prj>`2C0F8&DD%Ni~#H#1zrFE diff --git a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_util.po b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_util.po index dc166d346..fe22127c5 100644 --- a/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_util.po +++ b/i18n/locales/zh_CN/LC_MESSAGES/dbgpt_util.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-03-24 22:53+0800\n" +"POT-Creation-Date: 2024-06-17 00:05+0800\n" "PO-Revision-Date: 2024-03-24 22:53+0800\n" "Last-Translator: Automatically generated\n" "Language-Team: none\n" @@ -19,8 +19,37 @@ msgstr "" #: ../dbgpt/util/serialization/json_serialization.py:23 msgid "Json Serializer" -msgstr "" +msgstr "JSON 序列化器" #: ../dbgpt/util/serialization/json_serialization.py:26 msgid "The serializer for serializing data with json format." -msgstr "" +msgstr "用于以 JSON 格式序列化数据的序列化器。" + +#: ../dbgpt/util/dbgpts/repo.py:72 +msgid "Repos" +msgstr "仓库" + +#: ../dbgpt/util/dbgpts/repo.py:73 ../dbgpt/util/dbgpts/repo.py:329 +#: ../dbgpt/util/dbgpts/repo.py:360 +msgid "Repository" +msgstr "存储库" + +#: ../dbgpt/util/dbgpts/repo.py:74 ../dbgpt/util/dbgpts/repo.py:361 +msgid "Path" +msgstr "路径" + +#: ../dbgpt/util/dbgpts/repo.py:327 +msgid "dbgpts In All Repos" +msgstr "所有存储库中的 dbgpts" + +#: ../dbgpt/util/dbgpts/repo.py:330 ../dbgpt/util/dbgpts/repo.py:359 +msgid "Type" +msgstr "类型" + +#: ../dbgpt/util/dbgpts/repo.py:331 ../dbgpt/util/dbgpts/repo.py:358 +msgid "Name" +msgstr "名称" + +#: ../dbgpt/util/dbgpts/repo.py:356 +msgid "Installed dbgpts" +msgstr "已安装的 dbgpts" \ No newline at end of file diff --git a/setup.py b/setup.py index 015a83ded..a6051cdc6 100644 --- a/setup.py +++ b/setup.py @@ -424,6 +424,8 @@ def core_requires(): setup_spec.extras["client"] = setup_spec.extras["core"] + [ "httpx", "fastapi>=0.100.0", + # For retry, chromadb need tenacity<=8.3.0 + "tenacity<=8.3.0", ] # Simple command line dependencies setup_spec.extras["cli"] = setup_spec.extras["client"] + [ @@ -440,6 +442,8 @@ def core_requires(): # https://github.com/eosphoros-ai/DB-GPT/issues/551 # TODO: remove pandas dependency "pandas==2.0.3", + # numpy should less than 2.0.0 + "numpy>=1.21.0,<2.0.0", ] # Just use by DB-GPT internal, we should find the smallest dependency set for run